diff --git a/cmake/anakin_subgraph.cmake b/cmake/anakin_subgraph.cmake index 4a7d32a63553df31e0928e7b30249ff3e809cba1..b5437e776d31e4d4fec5a79bf505202d192cd5ca 100644 --- a/cmake/anakin_subgraph.cmake +++ b/cmake/anakin_subgraph.cmake @@ -25,8 +25,9 @@ endif() if(ANAKIN_FOUND) message(STATUS "Current ANAKIN header is ${ANAKIN_INCLUDE_DIR}/anakin_config.h. ") + include_directories(${ANAKIN_ROOT}) include_directories(${ANAKIN_ROOT}/include) - include_directories(${ANAKIN_ROOT}/include/saber) + include_directories(${ANAKIN_ROOT}/saber) link_directories(${ANAKIN_ROOT}) add_definitions(-DPADDLE_WITH_ANAKIN) endif() diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 6f2af8670f25c00ac0970fe4ae2b0c5b03aa0d9e..012283c6ea7762f3932ce55d5f86c16623679e75 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -77,6 +77,7 @@ else(WIN32) ENDIF(WIN32) MESSAGE(STATUS "warp-ctc library: ${WARPCTC_LIBRARIES}") +get_filename_component(WARPCTC_LIBRARY_PATH ${WARPCTC_LIBRARIES} DIRECTORY) INCLUDE_DIRECTORIES(${WARPCTC_INCLUDE_DIR}) # For warpctc code to include its headers. INCLUDE_DIRECTORIES(${THIRD_PARTY_PATH}/install) # For Paddle code to include warpctc headers. diff --git a/cmake/version.cmake b/cmake/version.cmake index dd57d4ab9969ce530f93ca1694350b1a26b5b543..f7b065b582c52d7e45d2260b1db304d182f9066c 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -3,6 +3,8 @@ set(PADDLE_VERSION $ENV{PADDLE_VERSION}) set(tmp_version "HEAD") set(TAG_VERSION_REGEX "[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?") set(COMMIT_VERSION_REGEX "[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+") +set(LATEST_PADDLE_VERSION "latest") + while ("${PADDLE_VERSION}" STREQUAL "") # Check current branch name execute_process( @@ -23,8 +25,8 @@ while ("${PADDLE_VERSION}" STREQUAL "") if (${GIT_BRANCH_NAME} MATCHES "release/${TAG_VERSION_REGEX}") # Check the tag is a correct version if (${GIT_TAG_NAME} MATCHES "${COMMIT_VERSION_REGEX}") - # if no tag was found, set PADDLE_VERSION to 0.0.0 to represent latest - set(PADDLE_VERSION "0.0.0") + # if no tag was found, set PADDLE_VERSION to "latest" + set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}") elseif (${GIT_TAG_NAME} MATCHES "v${TAG_VERSION_REGEX}") string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME}) else() # otherwise, get the previous git tag name. @@ -42,19 +44,19 @@ while ("${PADDLE_VERSION}" STREQUAL "") if (${GIT_EXACT_TAG_NAME} MATCHES "v${TAG_VERSION_REGEX}") string(REPLACE "v" "" PADDLE_VERSION ${GIT_EXACT_TAG_NAME}) else() - set(PADDLE_VERSION "0.0.0") + set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}") endif() else() - # otherwise, we always set PADDLE_VERSION to 0.0.0 to represent latest - set(PADDLE_VERSION "0.0.0") + # otherwise, we always set PADDLE_VERSION to "latest" + set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}") endif() endif() else() - set(PADDLE_VERSION "0.0.0") + set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}") message(WARNING "Cannot add paddle version from git tag") endif() else() - set(PADDLE_VERSION "0.0.0") + set(PADDLE_VERSION "${LATEST_PADDLE_VERSION}") message(WARNING "Cannot add paddle version for wrong git branch result") endif() endwhile() diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 03c7f32a1261a184e6bdf4689aa411aa99ea8e68..fd9567dd6517e756b2c1e83ee502c92bd4a440cf 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -8,19 +8,19 @@ paddle.fluid.Program.parse_from_string (ArgSpec(args=['binary_str'], varargs=Non paddle.fluid.Program.to_string (ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)), ('document', 'faec17e5a04af28e3776160e34504d15')) paddle.fluid.default_startup_program (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '99e5d53d92d82797093332719c9e3ccd')) paddle.fluid.default_main_program (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '5430f54ab4895f9f47db6bebbaf71659')) -paddle.fluid.program_guard (ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b54f403e57825a1592aece03afe3afb6')) +paddle.fluid.program_guard (ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ae5f806f082cfaeaa5194cacc253a5e4')) paddle.fluid.name_scope (ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,)), ('document', '61660461e1f44e0480ca22fa8a482c41')) -paddle.fluid.cuda_places (ArgSpec(args=['device_ids'], varargs=None, keywords=None, defaults=(None,)), ('document', '7d9a51fc9cf3c5245b5227080a8064c3')) -paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', '4c0cd83f0b401fc2ff84c70974e5d210')) -paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd0c3ebd813c39958c92b78e3eef7e912')) -paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'f06314a1cb30c96b5808dde2219c2dae')) +paddle.fluid.cuda_places (ArgSpec(args=['device_ids'], varargs=None, keywords=None, defaults=(None,)), ('document', '7f3068b82fc427bfa04b1af953610992')) +paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', '8b674e9a7ac7944c27fd853b675c2cb2')) +paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'cc83b6c5ba8be38ff3ee87e9cec9de5f')) +paddle.fluid.in_dygraph_mode (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'eddb7a1f0083dcc70e9f6c71ee003cb9')) paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.Executor.close (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'f5369953dd0c443961cf79f7a00e1a03')) -paddle.fluid.Executor.infer_from_dataset (ArgSpec(args=['self', 'program', 'dataset', 'scope', 'thread', 'debug', 'fetch_list', 'fetch_info', 'print_period'], varargs=None, keywords=None, defaults=(None, None, None, 0, False, None, None, 100)), ('document', '9c7decb955b9c4f718114179c8985581')) -paddle.fluid.Executor.run (ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)), ('document', 'f482e93b38b4018796969a2e1dde479d')) -paddle.fluid.Executor.train_from_dataset (ArgSpec(args=['self', 'program', 'dataset', 'scope', 'thread', 'debug', 'fetch_list', 'fetch_info', 'print_period'], varargs=None, keywords=None, defaults=(None, None, None, 0, False, None, None, 100)), ('document', 'd521011d79e71080fe9b5bb179b43518')) -paddle.fluid.global_scope (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'e148d3ab1ed8edf3e928212a375959c0')) -paddle.fluid.scope_guard (ArgSpec(args=['scope'], varargs=None, keywords=None, defaults=None), ('document', 'b94d1f6bcc29c4fb58fc0058561250c2')) +paddle.fluid.Executor.close (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '3a584496aa1343f36eebf3c46b323a74')) +paddle.fluid.Executor.infer_from_dataset (ArgSpec(args=['self', 'program', 'dataset', 'scope', 'thread', 'debug', 'fetch_list', 'fetch_info', 'print_period'], varargs=None, keywords=None, defaults=(None, None, None, 0, False, None, None, 100)), ('document', 'bedc29ad01c1b911e99032ee1e19ac59')) +paddle.fluid.Executor.run (ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)), ('document', '4cfcd9c15b766a51b584cc46d38f1ad8')) +paddle.fluid.Executor.train_from_dataset (ArgSpec(args=['self', 'program', 'dataset', 'scope', 'thread', 'debug', 'fetch_list', 'fetch_info', 'print_period'], varargs=None, keywords=None, defaults=(None, None, None, 0, False, None, None, 100)), ('document', '28f50904a0213f110947a30e0438529c')) +paddle.fluid.global_scope (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'f65788d9ead293ada47551339df12203')) +paddle.fluid.scope_guard (ArgSpec(args=['scope'], varargs=None, keywords=None, defaults=None), ('document', '6e19f92e2f185320a3a86b77e85eb3b3')) paddle.fluid.DistributeTranspiler.__init__ (ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.DistributeTranspiler.get_pserver_program (ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None), ('document', '292ab72977afbe58e6a3bde175452680')) paddle.fluid.DistributeTranspiler.get_pserver_programs (ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None), ('document', '78f4949aedf317666a89ca74b3748ba8')) @@ -31,16 +31,17 @@ paddle.fluid.memory_optimize (ArgSpec(args=['input_program', 'skip_opt_set', 'pr paddle.fluid.release_memory (ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ac4114d3df16264f1946deb3a8434a6f')) paddle.fluid.DistributeTranspilerConfig.__init__ paddle.fluid.ParallelExecutor.__init__ (ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id', 'scope'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 1, 0, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.ParallelExecutor.run (ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '2cb4bd74481861345c70228a0f57620c')) -paddle.fluid.create_lod_tensor (ArgSpec(args=['data', 'recursive_seq_lens', 'place'], varargs=None, keywords=None, defaults=None), ('document', '8e7bb21e83ff4604f5b379672e285b94')) -paddle.fluid.create_random_int_lodtensor (ArgSpec(args=['recursive_seq_lens', 'base_shape', 'place', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '368f638b99f1dfe59e9b02aa6f077752')) +paddle.fluid.ParallelExecutor.drop_local_exe_scopes (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '80d857dc626612e2b2460d0154551e95')) +paddle.fluid.ParallelExecutor.run (ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '33ce6ec50f8eeb05d340e6b114b026fd')) +paddle.fluid.create_lod_tensor (ArgSpec(args=['data', 'recursive_seq_lens', 'place'], varargs=None, keywords=None, defaults=None), ('document', 'b82ea20e2dc5ff2372e0643169ca47ff')) +paddle.fluid.create_random_int_lodtensor (ArgSpec(args=['recursive_seq_lens', 'base_shape', 'place', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '74dc6d23185d90a7a50fbac19f5b65fb')) paddle.fluid.DataFeedDesc.__init__ (ArgSpec(args=['self', 'proto_file'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.DataFeedDesc.desc (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '4294493e31c4bc9fc4bd48753044235f')) paddle.fluid.DataFeedDesc.set_batch_size (ArgSpec(args=['self', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', '8d9f44601e0a99dd431f14fd9250cd21')) paddle.fluid.DataFeedDesc.set_dense_slots (ArgSpec(args=['self', 'dense_slots_name'], varargs=None, keywords=None, defaults=None), ('document', 'eb894b464bbcd1b4bc8038398954f766')) paddle.fluid.DataFeedDesc.set_use_slots (ArgSpec(args=['self', 'use_slots_name'], varargs=None, keywords=None, defaults=None), ('document', '415c56600ce4e198c071cad01409a690')) paddle.fluid.CompiledProgram.__init__ (ArgSpec(args=['self', 'program_or_graph'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.CompiledProgram.with_data_parallel (ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from', 'places'], varargs=None, keywords=None, defaults=(None, None, None, None, None)), ('document', 'a8c7793803cf976680d9478e378fa356')) +paddle.fluid.CompiledProgram.with_data_parallel (ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from', 'places'], varargs=None, keywords=None, defaults=(None, None, None, None, None)), ('document', '0e17773521634ef798fddd7d2ea3ef96')) paddle.fluid.CompiledProgram.with_inference_optimize (ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=None), ('document', '9e5b009d850191a010e859189c127fd8')) paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.ExecutionStrategy) -> None paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.GradientScaleStrategy, arg0: int) -> None @@ -52,14 +53,14 @@ paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_pr paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '0a5308f496632ab1ec3ba1f1377e6f95')) paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '41779819cef32f2246e83aebc5a002e2')) paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2')) -paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', '70f4f53f13572436ac72d1c8b5efeb9d')) -paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '7a5255386075dac3c75b7058254fcdcb')) +paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', 'af82e1b5fe5764029905a191b987f63d')) +paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '648f64d8fd81572eef34a69e533459ef')) paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a3fefec8bacd6ce83f49906a9d05e779')) -paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '7abd9cf7d695bab5bb6cf7ded5903cb2')) -paddle.fluid.io.PyReader.decorate_sample_list_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', 'faef298f73e91aedcfaf5d184f3109b7')) -paddle.fluid.io.PyReader.reset (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'ff1cc1e2beb8824d453656c72c28ddfb')) -paddle.fluid.io.PyReader.start (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'b7ea0a548991924e4cfe61a577b8e56d')) +paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '4a072de39998ee4e0de33fcec11325a6')) +paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '3db4b24d33fe4f711e303f9673dc5c6a')) +paddle.fluid.io.PyReader.decorate_sample_list_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '94adc0fb71c4b2ae6c3c74886c9cb898')) +paddle.fluid.io.PyReader.reset (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'd83714baf29f58d1605547e23d471fc7')) +paddle.fluid.io.PyReader.start (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'ac8d2fd0a8581a01616c6458ef3c04cb')) paddle.fluid.initializer.ConstantInitializer.__init__ (ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.initializer.UniformInitializer.__init__ (ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.initializer.NormalInitializer.__init__ (ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) @@ -78,8 +79,8 @@ paddle.fluid.layers.dynamic_gru (ArgSpec(args=['input', 'size', 'param_attr', 'b paddle.fluid.layers.gru_unit (ArgSpec(args=['input', 'hidden', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid', False)), ('document', 'e0e2439f7af069b57badca18a6ba60b8')) paddle.fluid.layers.linear_chain_crf (ArgSpec(args=['input', 'label', 'param_attr'], varargs=None, keywords=None, defaults=(None,)), ('document', '7c49ef4bbf0adfd4b9a1d98e2e5f3fea')) paddle.fluid.layers.crf_decoding (ArgSpec(args=['input', 'param_attr', 'label'], varargs=None, keywords=None, defaults=(None,)), ('document', '462ddf2435e3392334e0c05ae57a01c4')) -paddle.fluid.layers.cos_sim (ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None), ('document', 'd740824aa7316b807c4b4a3c6c8c0bbe')) -paddle.fluid.layers.cross_entropy (ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)), ('document', '025b364dafb4b7975c801eb33e7831a1')) +paddle.fluid.layers.cos_sim (ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None), ('document', 'cefab7c23ee5582727e8b22dffbafac8')) +paddle.fluid.layers.cross_entropy (ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)), ('document', '535f1f6213dd7ca0fe5ed7cb4718c0e3')) paddle.fluid.layers.bpr_loss (ArgSpec(args=['input', 'label', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '30add751a0f99347a6257634c03ff254')) paddle.fluid.layers.square_error_cost (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', 'f273bb26833ee88b349c4b8083e1dc67')) paddle.fluid.layers.chunk_eval (ArgSpec(args=['input', 'label', 'chunk_scheme', 'num_chunk_types', 'excluded_chunk_types'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ee152a7ba3036e7b9ede9184545179b4')) @@ -93,7 +94,7 @@ paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'po paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', '043de7333b79ee0ac55053c14ed81625')) paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '859b887174d06f361658f69cb7c06d95')) paddle.fluid.layers.adaptive_pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '120f4323a3d7ed9c0916f15a59f0e497')) -paddle.fluid.layers.batch_norm (ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False, False)), ('document', '320c6973b02ea179fa89fecc80796464')) +paddle.fluid.layers.batch_norm (ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu', 'use_global_stats'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, None, None, None, False, False, False)), ('document', '581f9f99cd7f4b0cab9e0aad5fa0ea24')) paddle.fluid.layers.data_norm (ArgSpec(args=['input', 'act', 'epsilon', 'param_attr', 'data_layout', 'in_place', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var'], varargs=None, keywords=None, defaults=(None, 1e-05, None, 'NCHW', False, None, None, None, False)), ('document', 'e45e09e65a2658e07cad987222f0d9ab')) paddle.fluid.layers.beam_search_decode (ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b0b8d53821716cd50c42e09b593f3feb')) paddle.fluid.layers.conv2d_transpose (ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)), ('document', '03993955ab1e6d3044c44e6f17fc85e9')) @@ -115,7 +116,7 @@ paddle.fluid.layers.sequence_last_step (ArgSpec(args=['input'], varargs=None, ke paddle.fluid.layers.sequence_slice (ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'fdcea0e8b5bc7d8d4b1b072c521014e6')) paddle.fluid.layers.dropout (ArgSpec(args=['x', 'dropout_prob', 'is_test', 'seed', 'name', 'dropout_implementation'], varargs=None, keywords=None, defaults=(False, None, None, 'downgrade_in_infer')), ('document', 'f1dd22f7351f7f9853212958e0d8aa7a')) paddle.fluid.layers.split (ArgSpec(args=['input', 'num_or_sections', 'dim', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '652625345c2acb900029c78cc75f8aa6')) -paddle.fluid.layers.ctc_greedy_decoder (ArgSpec(args=['input', 'blank', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'ebbf2adbd79683dc93db03454dfa18c2')) +paddle.fluid.layers.ctc_greedy_decoder (ArgSpec(args=['input', 'blank', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '2bc3a59efa9d52b628a6255422d9f0e8')) paddle.fluid.layers.edit_distance (ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens'], varargs=None, keywords=None, defaults=(True, None)), ('document', '97f0262f97602644c83142789d784571')) paddle.fluid.layers.l2_normalize (ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)), ('document', '35c6a241bcc1a1fc89508860d82ad62b')) paddle.fluid.layers.matmul (ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'alpha', 'name'], varargs=None, keywords=None, defaults=(False, False, 1.0, None)), ('document', 'b4cbe1ac451005df6dad12e9ffdccca9')) @@ -133,7 +134,7 @@ paddle.fluid.layers.multiplex (ArgSpec(args=['inputs', 'index'], varargs=None, k paddle.fluid.layers.layer_norm (ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)), ('document', 'de6a906950bae9f3c245cb744d22b94e')) paddle.fluid.layers.group_norm (ArgSpec(args=['input', 'groups', 'epsilon', 'param_attr', 'bias_attr', 'act', 'data_layout', 'name'], varargs=None, keywords=None, defaults=(1e-05, None, None, None, 'NCHW', None)), ('document', '419c3a24a83cc89219a029cf4092788b')) paddle.fluid.layers.spectral_norm (ArgSpec(args=['weight', 'dim', 'power_iters', 'eps', 'name'], varargs=None, keywords=None, defaults=(0, 1, 1e-12, None)), ('document', '3f536aafba30d793287b52d231baff1b')) -paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax'], varargs=None, keywords=None, defaults=(False, -100, True, False)), ('document', 'bce1b75e3d95b75cacd1099655cbb3c3')) +paddle.fluid.layers.softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'soft_label', 'ignore_index', 'numeric_stable_mode', 'return_softmax', 'axis'], varargs=None, keywords=None, defaults=(False, -100, True, False, -1)), ('document', '8b074f9c56b4233a2b65d03254eb309e')) paddle.fluid.layers.smooth_l1 (ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'c6b175d253c55baf4b9c0eca9b1dda88')) paddle.fluid.layers.one_hot (ArgSpec(args=['input', 'depth'], varargs=None, keywords=None, defaults=None), ('document', '960fc799549c202da1e85d626cb2c962')) paddle.fluid.layers.autoincreased_step_counter (ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)), ('document', '67afefa80b6cc38801bd5b631fed8a4a')) @@ -141,13 +142,13 @@ paddle.fluid.layers.reshape (ArgSpec(args=['x', 'shape', 'actual_shape', 'act', paddle.fluid.layers.squeeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3229d06517f794e86ca3da14c38b1465')) paddle.fluid.layers.unsqueeze (ArgSpec(args=['input', 'axes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'bbd62da391b1df984a1909d069a759b2')) paddle.fluid.layers.lod_reset (ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'f122194c562bd674f6ecdccf33785f99')) -paddle.fluid.layers.lrn (ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', '0795e9940e42dcd62953514ff7e09f77')) -paddle.fluid.layers.pad (ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '2f28153bdd2d5ea6f7bad5867bd03eeb')) -paddle.fluid.layers.pad_constant_like (ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', 'd2e1f45fef51b2c214e3f2aa8976c46c')) +paddle.fluid.layers.lrn (ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)), ('document', '330241f0bc57e9d16973ec322a6aef71')) +paddle.fluid.layers.pad (ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '2f189f8ef61f1c23779e1593b78755c0')) +paddle.fluid.layers.pad_constant_like (ArgSpec(args=['x', 'y', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)), ('document', '95aa1972983f30fe9b5a3713e523e20f')) paddle.fluid.layers.label_smooth (ArgSpec(args=['label', 'prior_dist', 'epsilon', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 0.1, 'float32', None)), ('document', '70c113658102a11cc5d8e3d45145737a')) paddle.fluid.layers.roi_pool (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)), ('document', 'c317aa595deb31649083c8faa91cdb97')) paddle.fluid.layers.roi_align (ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale', 'sampling_ratio', 'name'], varargs=None, keywords=None, defaults=(1, 1, 1.0, -1, None)), ('document', '3d8f4891c1d5e890a4e574371027dd35')) -paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,)), ('document', '1ba0508d573f65feecf3564dce22aa1d')) +paddle.fluid.layers.dice_loss (ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,)), ('document', '7e8e4bf1f0f8612961ed113e8af8f0c5')) paddle.fluid.layers.image_resize (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR', None, True, 1)), ('document', 'f1bc5eb7198175d2b79197a681d98b43')) paddle.fluid.layers.image_resize_short (ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',)), ('document', '099b9f051e6247ae661e4a7b4fd3f89a')) paddle.fluid.layers.resize_bilinear (ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'actual_shape', 'align_corners', 'align_mode'], varargs=None, keywords=None, defaults=(None, None, None, None, True, 1)), ('document', '746bf58fdb1bd475f8c5f996b05b0e52')) @@ -157,10 +158,10 @@ paddle.fluid.layers.scatter (ArgSpec(args=['input', 'index', 'updates', 'name'], paddle.fluid.layers.sequence_scatter (ArgSpec(args=['input', 'index', 'updates', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '15b522457dfef103f0c20ca9d397678b')) paddle.fluid.layers.random_crop (ArgSpec(args=['x', 'shape', 'seed'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c9ab9e460ef0a1823249935a30e82c66')) paddle.fluid.layers.mean_iou (ArgSpec(args=['input', 'label', 'num_classes'], varargs=None, keywords=None, defaults=None), ('document', '35cbbdfa585d027bb490707c95a176b9')) -paddle.fluid.layers.relu (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '866ffa1cc93f29e23662b526a7596537')) +paddle.fluid.layers.relu (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'bf1676268df8ef100b8ab01d51336b25')) paddle.fluid.layers.selu (ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '9044c7fe667b76cb2d9264f2db11f417')) paddle.fluid.layers.log (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '98247c59d1c9b40af6730001b2aea73d')) -paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '883104791204d3127e24234bb630b2e7')) +paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'ddf9837ee83e549119210a3d714d5f44')) paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c542e39ac6add24a6bef6e79bf5617e2')) paddle.fluid.layers.margin_rank_loss (ArgSpec(args=['label', 'left', 'right', 'margin', 'name'], varargs=None, keywords=None, defaults=(0.1, None)), ('document', '99b3fee0daee04911d2bee8871b26435')) paddle.fluid.layers.elu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', '463258ee9f8b60760eb1e26357cc9bfa')) @@ -182,13 +183,15 @@ paddle.fluid.layers.sequence_enumerate (ArgSpec(args=['input', 'win_size', 'pad_ paddle.fluid.layers.expand (ArgSpec(args=['x', 'expand_times', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '117d3607d1ffa0571835bbaebc7857ff')) paddle.fluid.layers.sequence_concat (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3a1d155dd1bf6e72a0a3e3e1519591d1')) paddle.fluid.layers.scale (ArgSpec(args=['x', 'scale', 'bias', 'bias_after_scale', 'act', 'name'], varargs=None, keywords=None, defaults=(1.0, 0.0, True, None, None)), ('document', '30190413b2fa442e7466d6cf2ce5ea07')) -paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '6bfbe72cbadc95ac7ab88c05ed5bf9f0')) -paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'cc6e6cc1cb942a152dde3ef08d5f165c')) -paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'a12abdab09c3e57af5a6e1e9f138684a')) -paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '422c77dbfcff355a57b5fdd4ec876daa')) -paddle.fluid.layers.elementwise_max (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'f0bb0b2c454541cfafa761021a5cc776')) -paddle.fluid.layers.elementwise_min (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '8a9cdefefbccbf9f6b0991c0946a21e9')) -paddle.fluid.layers.elementwise_pow (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '1aea4e197c552a284f83888a3c67a32e')) +paddle.fluid.layers.elementwise_add (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '210ee7e597f429f836a21b298991ef85')) +paddle.fluid.layers.elementwise_div (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '9ce91719cf4a05de9a817e9ff2387ee8')) +paddle.fluid.layers.elementwise_sub (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'c66c50b550bc547b6c61d15c1f3ee2ab')) +paddle.fluid.layers.elementwise_mul (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'e6919013e5369c7b0d486b8604da6b2f')) +paddle.fluid.layers.elementwise_max (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'f839de1318c794f26b9f5aafcd2ad92f')) +paddle.fluid.layers.elementwise_min (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', 'c37aa719815585f2c20623f92e738d54')) +paddle.fluid.layers.elementwise_pow (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '984e0e72db2a3b4241a694499f8d76c8')) +paddle.fluid.layers.elementwise_mod (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '4aa6b682b8676a2f3adf9f58790e327d')) +paddle.fluid.layers.elementwise_floordiv (ArgSpec(args=['x', 'y', 'axis', 'act', 'name'], varargs=None, keywords=None, defaults=(-1, None, None)), ('document', '638ca44932743bda05caf3fcc15f1f0d')) paddle.fluid.layers.uniform_random_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'input_dim_idx', 'output_dim_idx', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', 0, 0, -1.0, 1.0, 0)), ('document', '129e0a3257f1d532a948eedf9d5bf671')) paddle.fluid.layers.gaussian_random (ArgSpec(args=['shape', 'mean', 'std', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', '389dafe36e099841b6a7fb18d11f1b4c')) paddle.fluid.layers.sampling_id (ArgSpec(args=['x', 'min', 'max', 'seed', 'dtype'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0, 'float32')), ('document', '35428949368cad5121dd37f8522ef8b0')) @@ -201,18 +204,18 @@ paddle.fluid.layers.logical_and (ArgSpec(args=['x', 'y', 'out', 'name'], varargs paddle.fluid.layers.logical_or (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '0eae3f726a4afe590757552fa3ced012')) paddle.fluid.layers.logical_xor (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'b0daaa3fa4a0aa62f9b58c43d959eb25')) paddle.fluid.layers.logical_not (ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'cd1c8cf31e040427d4e05711044caeb6')) -paddle.fluid.layers.clip (ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b020b7aab59719be98a4ae229a76deba')) +paddle.fluid.layers.clip (ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0ce33756573c572da67302499455dbcd')) paddle.fluid.layers.clip_by_norm (ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a1ea0bc5a926f427458c4254ca022749')) paddle.fluid.layers.mean (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd638d915195ce86a8d7963b81110d4c8')) paddle.fluid.layers.mul (ArgSpec(args=['x', 'y', 'x_num_col_dims', 'y_num_col_dims', 'name'], varargs=None, keywords=None, defaults=(1, 1, None)), ('document', 'ccd37fa6b53f074adbfb732d738c4c2d')) paddle.fluid.layers.sigmoid_cross_entropy_with_logits (ArgSpec(args=['x', 'label', 'ignore_index', 'name', 'normalize'], varargs=None, keywords=None, defaults=(-100, None, False)), ('document', '180c284317ea45ef89a460d8d79c0b72')) paddle.fluid.layers.maxout (ArgSpec(args=['x', 'groups', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '71426e02d240d0daedae81a02ca1c191')) paddle.fluid.layers.space_to_depth (ArgSpec(args=['x', 'blocksize', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a9221eaef53884a00654e028551b78e2')) -paddle.fluid.layers.affine_grid (ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '51def402b8910e163cbace9d0c0526ed')) +paddle.fluid.layers.affine_grid (ArgSpec(args=['theta', 'out_shape', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f85b263b7b6698d000977529a28f202b')) paddle.fluid.layers.sequence_reverse (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '77a6d80aa5551ca70324fc975c44507f')) paddle.fluid.layers.affine_channel (ArgSpec(args=['x', 'scale', 'bias', 'data_layout', 'name', 'act'], varargs=None, keywords=None, defaults=(None, None, 'NCHW', None, None)), ('document', 'ab84fdc6dc60f3ad9aa397e6007e3bf9')) paddle.fluid.layers.similarity_focus (ArgSpec(args=['input', 'axis', 'indexes', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6f90d6ff76bf4f5e592332c1ef28494e')) -paddle.fluid.layers.hash (ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', '9bb77f8dc002dd2ce75d4769eaaf5007')) +paddle.fluid.layers.hash (ArgSpec(args=['input', 'hash_size', 'num_hash', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', '97bf4353bb046a5629308a38f98ac204')) paddle.fluid.layers.grid_sampler (ArgSpec(args=['x', 'grid', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd256cba1c41a5ed92ce3f31e24a2ca6d')) paddle.fluid.layers.log_loss (ArgSpec(args=['input', 'label', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(0.0001, None)), ('document', 'af541e9263be61ce0e40df58d1b69294')) paddle.fluid.layers.add_position_encoding (ArgSpec(args=['input', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '4b9c2e8af5817937d831820874b5aa77')) @@ -223,7 +226,7 @@ paddle.fluid.layers.lstm (ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', paddle.fluid.layers.shuffle_channel (ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '2fa6782d43d02ae64482d21235a82949')) paddle.fluid.layers.temporal_shift (ArgSpec(args=['x', 'seg_num', 'shift_ratio', 'name'], varargs=None, keywords=None, defaults=(0.25, None)), ('document', 'fe4481fb31363b09cfdd228fc6776ddf')) paddle.fluid.layers.py_func (ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None)), ('document', '8404e472ac12b4a30a505d3d3a3e5fdb')) -paddle.fluid.layers.psroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '1546136806fef5c08f6918544bd9151d')) +paddle.fluid.layers.psroi_pool (ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '42d5155374f69786300d90d751956998')) paddle.fluid.layers.teacher_student_sigmoid_loss (ArgSpec(args=['input', 'label', 'soft_max_up_bound', 'soft_max_lower_bound'], varargs=None, keywords=None, defaults=(15.0, -15.0)), ('document', '2f6ff96864054a31aa4bb659c6722c99')) paddle.fluid.layers.huber_loss (ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '431a4301c35032166ec029f7432c80a7')) paddle.fluid.layers.kldiv_loss (ArgSpec(args=['x', 'target', 'reduction', 'name'], varargs=None, keywords=None, defaults=('mean', None)), ('document', '776d536cac47c89073abc7ee524d5aec')) @@ -232,15 +235,16 @@ paddle.fluid.layers.npair_loss (ArgSpec(args=['anchor', 'positive', 'labels', 'l paddle.fluid.layers.pixel_shuffle (ArgSpec(args=['x', 'upscale_factor'], varargs=None, keywords=None, defaults=None), ('document', '132b6e74ff642a392bd6b14c10aedc65')) paddle.fluid.layers.fsp_matrix (ArgSpec(args=['x', 'y'], varargs=None, keywords=None, defaults=None), ('document', 'b76ccca3735bea4a58a0dbf0d77c5393')) paddle.fluid.layers.continuous_value_model (ArgSpec(args=['input', 'cvm', 'use_cvm'], varargs=None, keywords=None, defaults=(True,)), ('document', 'a07a44c2bacdcd09c1f5f35a96a0514e')) -paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '33bbd42027d872b3818b3d64ec52e139')) -paddle.fluid.layers.open_files (ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)), ('document', 'b1ae2e1cc0750e58726374061ea90ecc')) +paddle.fluid.layers.where (ArgSpec(args=['condition'], varargs=None, keywords=None, defaults=None), ('document', '3126e3039e752ce26077f1efaca355c6')) +paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', 'adf285346e23316097f7789b572491e9')) +paddle.fluid.layers.open_files (ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)), ('document', 'cf12066a3139026119f97f9d4381a1bd')) paddle.fluid.layers.read_file (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', 'b0a1c2fc51c27a106da28f3308c41f5e')) paddle.fluid.layers.shuffle (ArgSpec(args=['reader', 'buffer_size'], varargs=None, keywords=None, defaults=None), ('document', 'f967a73426db26f970bc70bfb03cffca')) -paddle.fluid.layers.batch (ArgSpec(args=['reader', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', 'f563d376d35e1a4c4db100fd11b381a0')) +paddle.fluid.layers.batch (ArgSpec(args=['reader', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', 'fcb24383c6eef2ca040ee824c26e22fd')) paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '07e5b796674796eb1ef3fee9c10d24e3')) paddle.fluid.layers.random_data_generator (ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,)), ('document', '9b7f0f86ec24bbc97643cadcb6499cff')) -paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', 'c67f756da46159328d23fca29f599d8b')) -paddle.fluid.layers.create_py_reader_by_data (ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', '8acfa165dc4306ac437cc2f10b51b8f5')) +paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '5c54493d96c7e0760dc6758af1c8dd72')) +paddle.fluid.layers.create_py_reader_by_data (ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', 'b42332b894e1e0962c6a43f0151c2640')) paddle.fluid.layers.Preprocessor.__init__ (ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.Preprocessor.block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.Preprocessor.inputs (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) @@ -251,14 +255,14 @@ paddle.fluid.layers.create_parameter (ArgSpec(args=['shape', 'dtype', 'name', 'a paddle.fluid.layers.create_global_var (ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None)), ('document', 'ab914fac893607e29ac6e52bbdbea1a4')) paddle.fluid.layers.cast (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '992eb42590fc1c380841a6db72ce78b3')) paddle.fluid.layers.tensor_array_to_tensor (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'b12717d3d4567e6119589f7f655b0cbb')) -paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b19b79be4f05e85d1d6cec642c9fb535')) +paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'f9e905b48123914c78055a45fe23106a')) paddle.fluid.layers.sums (ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '42912092418620b4be07f36af31e7816')) paddle.fluid.layers.assign (ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b690184f3537df5501e4d9d8f31152a5')) paddle.fluid.layers.fill_constant_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0)), ('document', 'd4059a2f5763036b07018d76429f9acb')) paddle.fluid.layers.fill_constant (ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', '1d8b14729639fa38509c79b9784740fa')) -paddle.fluid.layers.argmin (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '2778a1d34be49263a51211885599ea37')) -paddle.fluid.layers.argmax (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '04114996cfb98994ba222804a1a6109f')) -paddle.fluid.layers.argsort (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '68ec45c6fb6b93e47de9c9a0945fb98e')) +paddle.fluid.layers.argmin (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '677c09cc0fd7381974bfc845c4d9f0f2')) +paddle.fluid.layers.argmax (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', 'ef64ee883998e7e246a854a845e11e2c')) +paddle.fluid.layers.argsort (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '0a85a9a145d2e24e05958a3f1322d68a')) paddle.fluid.layers.ones (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', 'b402489c62e668df42e7daceb63c142b')) paddle.fluid.layers.zeros (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', 'c155e2efc56ffa5ed4658cca0272e491')) paddle.fluid.layers.reverse (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None), ('document', '8ee7cb6ca639e7460e825f953b65d94d')) @@ -268,6 +272,7 @@ paddle.fluid.layers.isfinite (ArgSpec(args=['x'], varargs=None, keywords=None, d paddle.fluid.layers.range (ArgSpec(args=['start', 'end', 'step', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '2ec937ede953ded2fdff2675883900bb')) paddle.fluid.layers.linspace (ArgSpec(args=['start', 'stop', 'num', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '495e21e9a848c2d075a102802fc67756')) paddle.fluid.layers.zeros_like (ArgSpec(args=['x', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c7e4cfffc93ae89c8f6f53b6d650f923')) +paddle.fluid.layers.diag (ArgSpec(args=['diagonal'], varargs=None, keywords=None, defaults=None), ('document', '2964d07340e32e47efb6e5db619875c7')) paddle.fluid.layers.While.__init__ (ArgSpec(args=['self', 'cond', 'is_test', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.While.block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.Switch.__init__ (ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) @@ -306,27 +311,27 @@ paddle.fluid.layers.StaticRNN.update_memory (ArgSpec(args=['self', 'mem', 'var'] paddle.fluid.layers.reorder_lod_tensor_by_rank (ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None), ('document', '3545f529ef04e8f6ecb76b47fa3df01a')) paddle.fluid.layers.Print (ArgSpec(args=['input', 'first_n', 'message', 'summarize', 'print_tensor_name', 'print_tensor_type', 'print_tensor_shape', 'print_tensor_lod', 'print_phase'], varargs=None, keywords=None, defaults=(-1, None, -1, True, True, True, True, 'both')), ('document', '5fef91b0e21c93610785f2b1f7161732')) paddle.fluid.layers.is_empty (ArgSpec(args=['x', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', 'bbe578dbb49ad13e15b014e98c22b519')) -paddle.fluid.layers.sigmoid (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '29a25ba78de79152076cacfc5443137d')) -paddle.fluid.layers.logsigmoid (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '81ccb7acafd06c7728e11581f5d342e3')) -paddle.fluid.layers.exp (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e6b3e769413d96aab4176f96db25984b')) -paddle.fluid.layers.tanh (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e9d586a0b5bd05f67ee78048f9d503b6')) -paddle.fluid.layers.atan (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3a46e0b5f9ce82348406478e610f14c9')) -paddle.fluid.layers.tanh_shrink (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '1e521554b9fdda9061ec6d306f0709b7')) -paddle.fluid.layers.softshrink (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '9eef31597bbafa2bd49691e072296e13')) -paddle.fluid.layers.sqrt (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e9e27491c39ac74d0b1ffe506aec0ebb')) -paddle.fluid.layers.rsqrt (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c445467ebe58b3c0d7f0bba7795b6f56')) -paddle.fluid.layers.abs (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '64650ac42cf82e9920cb0b172b1d29fd')) -paddle.fluid.layers.ceil (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c75d67dc5fe28f68e4cfffead4f698ad')) -paddle.fluid.layers.floor (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '647b16c5da5ef909649ae02abb434973')) -paddle.fluid.layers.cos (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '485f2686bcc2fe37a4bd893769c8a3e2')) -paddle.fluid.layers.acos (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '920a47734482276c069ba24c61c26b25')) -paddle.fluid.layers.asin (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'cf4ee2c9b9d7293556f8c5173dfb5d2c')) -paddle.fluid.layers.sin (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '01f1766aa76eff1df30147505b59f7c4')) -paddle.fluid.layers.round (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b47f5da13913d3e56bdb1e612a73f3f2')) -paddle.fluid.layers.reciprocal (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'cc6ac2f14f03c52aaa83a59bf83b8d26')) -paddle.fluid.layers.square (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '48dfb45d773dbc30126c3a7f777de5ee')) -paddle.fluid.layers.softplus (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '459c5781e9d1dd88283b7c5769d7872a')) -paddle.fluid.layers.softsign (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '80846bcd4bd457207457a6d5411f4148')) +paddle.fluid.layers.sigmoid (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a4e395ab004e7da34e94a0a1f9eee183')) +paddle.fluid.layers.logsigmoid (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5f2508c52e0a797bb9bd5e29d79ede78')) +paddle.fluid.layers.exp (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '41c976b68542f4cbee178640f765d845')) +paddle.fluid.layers.tanh (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a64a80b8ab637e33fc5d0dd63fdbdc47')) +paddle.fluid.layers.atan (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'fe8b8bf36a726362b2a8c1fa01fd2590')) +paddle.fluid.layers.tanh_shrink (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7767e47ffee21281ed5e1f399ef4224b')) +paddle.fluid.layers.softshrink (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c6af2e21ce4fbc4d19dc51ab2acef6e1')) +paddle.fluid.layers.sqrt (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '33aa2c16c320406237f40aa44de5d6bc')) +paddle.fluid.layers.rsqrt (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'fd30d2ab9df5e905832ac9ee31ca382f')) +paddle.fluid.layers.abs (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c66fcec5f16e4a1fe8c74d183446946e')) +paddle.fluid.layers.ceil (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '57435860b214ffafa9b05e8ebb7ced7a')) +paddle.fluid.layers.floor (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b3d6334262f7cc9f39cd4b1d10369ab0')) +paddle.fluid.layers.cos (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3e78bba17de597f224d01f1f20e6fc63')) +paddle.fluid.layers.acos (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3b24ef9e5aca6e0ebba3e473be589b00')) +paddle.fluid.layers.asin (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e691d4df018ef6bc05487e85714171c1')) +paddle.fluid.layers.sin (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e71a4f10c099159ccc0f5a69d443ad68')) +paddle.fluid.layers.round (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'e0e36682b9717322fe111dda7d328d34')) +paddle.fluid.layers.reciprocal (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '3b6b463c0a01694f4322b5d4521c3944')) +paddle.fluid.layers.square (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'fcc0d8ec2d2983f5d2ae0196fa83916b')) +paddle.fluid.layers.softplus (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a9bef6674dc20af1ae901656ed041cdf')) +paddle.fluid.layers.softsign (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5c1e9c619db82d6392826d0c2908ea55')) paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', 'a8c4e972b7d6742c838a37abf407ed9a')) paddle.fluid.layers.hard_shrink (ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)), ('document', 'c142f5884f3255e0d6075c286bbd531e')) paddle.fluid.layers.cumsum (ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '944d7c03057f5fc88bc78acd4d82f926')) @@ -341,7 +346,7 @@ paddle.fluid.layers.ssd_loss (ArgSpec(args=['location', 'confidence', 'gt_box', paddle.fluid.layers.detection_map (ArgSpec(args=['detect_res', 'label', 'class_num', 'background_label', 'overlap_threshold', 'evaluate_difficult', 'has_state', 'input_states', 'out_states', 'ap_version'], varargs=None, keywords=None, defaults=(0, 0.3, True, None, None, None, 'integral')), ('document', '1467d91b50c22cd52103b4aa1ee9d0a1')) paddle.fluid.layers.rpn_target_assign (ArgSpec(args=['bbox_pred', 'cls_logits', 'anchor_box', 'anchor_var', 'gt_boxes', 'is_crowd', 'im_info', 'rpn_batch_size_per_im', 'rpn_straddle_thresh', 'rpn_fg_fraction', 'rpn_positive_overlap', 'rpn_negative_overlap', 'use_random'], varargs=None, keywords=None, defaults=(256, 0.0, 0.5, 0.7, 0.3, True)), ('document', '1dddef3eb4b3cbd4df8e03ac480dbf97')) paddle.fluid.layers.anchor_generator (ArgSpec(args=['input', 'anchor_sizes', 'aspect_ratios', 'variance', 'stride', 'offset', 'name'], varargs=None, keywords=None, defaults=(None, None, [0.1, 0.1, 0.2, 0.2], None, 0.5, None)), ('document', '82b2aefeeb1b706bc4afec70928a259a')) -paddle.fluid.layers.roi_perspective_transform (ArgSpec(args=['input', 'rois', 'transformed_height', 'transformed_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1.0,)), ('document', '5761f9ed83654314416e24372b33bb84')) +paddle.fluid.layers.roi_perspective_transform (ArgSpec(args=['input', 'rois', 'transformed_height', 'transformed_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1.0,)), ('document', '9307c12b1d4e554279b9708f787cd019')) paddle.fluid.layers.generate_proposal_labels (ArgSpec(args=['rpn_rois', 'gt_classes', 'is_crowd', 'gt_boxes', 'im_info', 'batch_size_per_im', 'fg_fraction', 'fg_thresh', 'bg_thresh_hi', 'bg_thresh_lo', 'bbox_reg_weights', 'class_nums', 'use_random'], varargs=None, keywords=None, defaults=(256, 0.25, 0.25, 0.5, 0.0, [0.1, 0.1, 0.2, 0.2], None, True)), ('document', '87863717edeb7fe87a1268976cbc015d')) paddle.fluid.layers.generate_proposals (ArgSpec(args=['scores', 'bbox_deltas', 'im_info', 'anchors', 'variances', 'pre_nms_top_n', 'post_nms_top_n', 'nms_thresh', 'min_size', 'eta', 'name'], varargs=None, keywords=None, defaults=(6000, 1000, 0.5, 0.1, 1.0, None)), ('document', '57ab49f3f324f310b7eed322e7c1057a')) paddle.fluid.layers.generate_mask_labels (ArgSpec(args=['im_info', 'gt_classes', 'is_crowd', 'gt_segms', 'rois', 'labels_int32', 'num_classes', 'resolution'], varargs=None, keywords=None, defaults=None), ('document', 'f73706a65468e9ca3e0bee4a31521b0a')) @@ -349,7 +354,7 @@ paddle.fluid.layers.iou_similarity (ArgSpec(args=['x', 'y', 'name'], varargs=Non paddle.fluid.layers.box_coder (ArgSpec(args=['prior_box', 'prior_box_var', 'target_box', 'code_type', 'box_normalized', 'name', 'axis'], varargs=None, keywords=None, defaults=('encode_center_size', True, None, 0)), ('document', '032d0f4b7d8f6235ee5d91e473344f0e')) paddle.fluid.layers.polygon_box_transform (ArgSpec(args=['input', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0e5ac2507723a0b5adec473f9556799b')) paddle.fluid.layers.yolov3_loss (ArgSpec(args=['x', 'gt_box', 'gt_label', 'anchors', 'anchor_mask', 'class_num', 'ignore_thresh', 'downsample_ratio', 'gt_score', 'use_label_smooth', 'name'], varargs=None, keywords=None, defaults=(None, True, None)), ('document', 'eb62b1ff7cc981f3483a62321a491f2e')) -paddle.fluid.layers.yolo_box (ArgSpec(args=['x', 'img_size', 'anchors', 'class_num', 'conf_thresh', 'downsample_ratio', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '5566169a5ab993d177792c023c7fb340')) +paddle.fluid.layers.yolo_box (ArgSpec(args=['x', 'img_size', 'anchors', 'class_num', 'conf_thresh', 'downsample_ratio', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'f332fb8c5bb581bd1a6b5be450a99990')) paddle.fluid.layers.box_clip (ArgSpec(args=['input', 'im_info', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '04384378ff00a42ade8fabd52e27cbc5')) paddle.fluid.layers.multiclass_nms (ArgSpec(args=['bboxes', 'scores', 'score_threshold', 'nms_top_k', 'keep_top_k', 'nms_threshold', 'normalized', 'nms_eta', 'background_label', 'name'], varargs=None, keywords=None, defaults=(0.3, True, 1.0, 0, None)), ('document', 'ca7d1107b6c5d2d6d8221039a220fde0')) paddle.fluid.layers.distribute_fpn_proposals (ArgSpec(args=['fpn_rois', 'min_level', 'max_level', 'refer_level', 'refer_scale', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7bb011ec26bace2bc23235aa4a17647d')) @@ -359,9 +364,9 @@ paddle.fluid.layers.auc (ArgSpec(args=['input', 'label', 'curve', 'num_threshold paddle.fluid.layers.exponential_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', '98a5050bee8522fcea81aa795adaba51')) paddle.fluid.layers.natural_exp_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', '676a7bc2a218691db50bca233903d21e')) paddle.fluid.layers.inverse_time_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)), ('document', 'd07e767d59c4a5e6c930f3e6756d3f82')) -paddle.fluid.layers.polynomial_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False)), ('document', '882634f420f626642f0874481263da40')) -paddle.fluid.layers.piecewise_decay (ArgSpec(args=['boundaries', 'values'], varargs=None, keywords=None, defaults=None), ('document', 'c717d9d1d78a53c809d01b8bc56f3cae')) -paddle.fluid.layers.noam_decay (ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None), ('document', 'd9a95746353fd574be36dc28d8726c28')) +paddle.fluid.layers.polynomial_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False)), ('document', 'a343254c36c2e89512cd8cd8a1960ead')) +paddle.fluid.layers.piecewise_decay (ArgSpec(args=['boundaries', 'values'], varargs=None, keywords=None, defaults=None), ('document', 'd9f654117542c6b702963dda107a247f')) +paddle.fluid.layers.noam_decay (ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None), ('document', 'f96805b1a64f9a12f4627497e5fcb920')) paddle.fluid.layers.cosine_decay (ArgSpec(args=['learning_rate', 'step_each_epoch', 'epochs'], varargs=None, keywords=None, defaults=None), ('document', 'f8b2727bccf0f368c997d7cf05847e49')) paddle.fluid.layers.linear_lr_warmup (ArgSpec(args=['learning_rate', 'warmup_steps', 'start_lr', 'end_lr'], varargs=None, keywords=None, defaults=None), ('document', '2ef3f5ca5cd71ea4217c418e5a7a0565')) paddle.fluid.contrib.InitState.__init__ (ArgSpec(args=['self', 'init', 'shape', 'value', 'init_boot', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, None, False, 'float32')), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) @@ -430,11 +435,11 @@ paddle.fluid.transpiler.RoundRobin.__init__ (ArgSpec(args=['self', 'pserver_endp paddle.fluid.transpiler.RoundRobin.dispatch (ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.transpiler.RoundRobin.reset (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.transpiler.DistributeTranspilerConfig.__init__ -paddle.fluid.nets.simple_img_conv_pool (ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True)), ('document', 'e0f67f35abf27f666f81003113b90244')) -paddle.fluid.nets.sequence_conv_pool (ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type', 'bias_attr'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max', None)), ('document', '48c434dd7bb827f69d90e5135d77470f')) +paddle.fluid.nets.simple_img_conv_pool (ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True)), ('document', '13f01ff80e8dfbd3427d90cf49bc62eb')) +paddle.fluid.nets.sequence_conv_pool (ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type', 'bias_attr'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max', None)), ('document', 'd6a1e527b53f5cc15594fee307dfc5cf')) paddle.fluid.nets.glu (ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,)), ('document', '6486b2595300fc3305b5a1f0ac363dce')) paddle.fluid.nets.scaled_dot_product_attention (ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0)), ('document', '921714c9bfb351b41403418265393203')) -paddle.fluid.nets.img_conv_group (ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True)), ('document', '3802be78fbfb206dae64a2d9f8480970')) +paddle.fluid.nets.img_conv_group (ArgSpec(args=['input', 'conv_num_filter', 'pool_size', 'conv_padding', 'conv_filter_size', 'conv_act', 'param_attr', 'conv_with_batchnorm', 'conv_batchnorm_drop_rate', 'pool_stride', 'pool_type', 'use_cudnn'], varargs=None, keywords=None, defaults=(1, 3, None, None, False, 0.0, 1, 'max', True)), ('document', '5178bc1b4d302192597a5efbae13d902')) paddle.fluid.optimizer.SGDOptimizer.__init__ (ArgSpec(args=['self', 'learning_rate', 'regularization', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.SGDOptimizer.apply_gradients (ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', 'bfe7305918552aaecfdaa22411dbe871')) paddle.fluid.optimizer.SGDOptimizer.apply_optimize (ArgSpec(args=['self', 'loss', 'startup_program', 'params_grads'], varargs=None, keywords=None, defaults=None), ('document', '5c46d1926a40f1f873ffe9f37ac89dae')) @@ -509,7 +514,7 @@ paddle.fluid.optimizer.DGCMomentumOptimizer.apply_optimize (ArgSpec(args=['self' paddle.fluid.optimizer.DGCMomentumOptimizer.backward (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'ba3a113d0229ff7bc9d39bda0a6d947f')) paddle.fluid.optimizer.DGCMomentumOptimizer.get_opti_var_name_list (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.optimizer.DGCMomentumOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '35fd5d3330c97903528c7e0dacc7f6ea')) -paddle.fluid.backward.append_backward (ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '1a79bd7d10ae54ca763ec81bca36ba24')) +paddle.fluid.backward.append_backward (ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '08a5dd9f6f376ff3d55e0b1d92115cbd')) paddle.fluid.regularizer.L1DecayRegularizer.__init__ (ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.regularizer.L2DecayRegularizer.__init__ (ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.LoDTensor.__init__ 1. __init__(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None 2. __init__(self: paddle.fluid.core.LoDTensor) -> None diff --git a/paddle/fluid/framework/data_feed.cc b/paddle/fluid/framework/data_feed.cc index 0291b6f66a9e8cb6a3c16530084d3e3e7a6c39c1..02e467e853e9c3e7a4d581043e6a8f7b70519521 100644 --- a/paddle/fluid/framework/data_feed.cc +++ b/paddle/fluid/framework/data_feed.cc @@ -455,6 +455,8 @@ void MultiSlotDataFeed::Init( all_slots_.resize(all_slot_num); all_slots_type_.resize(all_slot_num); use_slots_index_.resize(all_slot_num); + total_dims_without_inductive_.resize(all_slot_num); + inductive_shape_index_.resize(all_slot_num); use_slots_.clear(); use_slots_is_dense_.clear(); for (size_t i = 0; i < all_slot_num; ++i) { @@ -462,14 +464,20 @@ void MultiSlotDataFeed::Init( all_slots_[i] = slot.name(); all_slots_type_[i] = slot.type(); use_slots_index_[i] = slot.is_used() ? use_slots_.size() : -1; + total_dims_without_inductive_[i] = 1; + inductive_shape_index_[i] = -1; if (slot.is_used()) { use_slots_.push_back(all_slots_[i]); use_slots_is_dense_.push_back(slot.is_dense()); std::vector local_shape; if (slot.is_dense()) { - // for batch size holder if is_dense - if (slot.shape(0) > 0) { - local_shape.push_back(0); + for (size_t i = 0; i < slot.shape_size(); ++i) { + if (slot.shape(i) > 0) { + total_dims_without_inductive_[i] *= slot.shape(i); + } + if (slot.shape(i) == -1) { + inductive_shape_index_[i] = i; + } } } for (size_t i = 0; i < slot.shape_size(); ++i) { @@ -762,7 +770,10 @@ void MultiSlotDataFeed::PutToFeedVec( LoD data_lod{offset}; feed_vec_[i]->set_lod(data_lod); if (use_slots_is_dense_[i]) { - use_slots_shape_[i][0] = batch_size_; + if (inductive_shape_index_[i] != -1) { + use_slots_shape_[i][inductive_shape_index_[i]] = + total_instance / total_dims_without_inductive_[i]; + } feed_vec_[i]->Resize(framework::make_ddim(use_slots_shape_[i])); } } @@ -785,6 +796,8 @@ void MultiSlotInMemoryDataFeed::Init( all_slots_.resize(all_slot_num); all_slots_type_.resize(all_slot_num); use_slots_index_.resize(all_slot_num); + total_dims_without_inductive_.resize(all_slot_num); + inductive_shape_index_.resize(all_slot_num); use_slots_.clear(); use_slots_is_dense_.clear(); for (size_t i = 0; i < all_slot_num; ++i) { @@ -797,8 +810,13 @@ void MultiSlotInMemoryDataFeed::Init( use_slots_is_dense_.push_back(slot.is_dense()); std::vector local_shape; if (slot.is_dense()) { - if (slot.shape(0) > 0) { - local_shape.push_back(0); + for (size_t i = 0; i < slot.shape_size(); ++i) { + if (slot.shape(i) > 0) { + total_dims_without_inductive_[i] *= slot.shape(i); + } + if (slot.shape(i) == -1) { + inductive_shape_index_[i] = i; + } } } for (size_t i = 0; i < slot.shape_size(); ++i) { @@ -960,7 +978,10 @@ void MultiSlotInMemoryDataFeed::PutToFeedVec( LoD data_lod{offset}; feed_vec_[i]->set_lod(data_lod); if (use_slots_is_dense_[i]) { - use_slots_shape_[i][0] = batch_size_; + if (inductive_shape_index_[i] != -1) { + use_slots_shape_[i][inductive_shape_index_[i]] = + total_instance / total_dims_without_inductive_[i]; + } feed_vec_[i]->Resize(framework::make_ddim(use_slots_shape_[i])); } } diff --git a/paddle/fluid/framework/data_feed.h b/paddle/fluid/framework/data_feed.h index d098c7858a98c644bd3cad78d3cf1e3b35ca026b..c141059a6d6b3420f02c1b6090cf67db7b7b4da8 100644 --- a/paddle/fluid/framework/data_feed.h +++ b/paddle/fluid/framework/data_feed.h @@ -143,6 +143,8 @@ class DataFeed { std::vector all_slots_; std::vector all_slots_type_; std::vector> use_slots_shape_; + std::vector inductive_shape_index_; + std::vector total_dims_without_inductive_; std::vector use_slots_index_; // -1: not used; >=0: the index of use_slots_ diff --git a/paddle/fluid/framework/ddim.cc b/paddle/fluid/framework/ddim.cc index e7a6df57e538164969bc101ced4b91de8f75ca56..bbc9982d9db4cd5bec872b44d2385afccd77ffd3 100644 --- a/paddle/fluid/framework/ddim.cc +++ b/paddle/fluid/framework/ddim.cc @@ -121,6 +121,16 @@ int64_t product(const DDim& ddim) { return ddim.apply_visitor(ProductVisitor()); } +bool contain_unknown_dim(const DDim& ddim) { + for (int i = 0; i < ddim.size(); ++i) { + if (ddim[i] < 0) { + return true; + } + } + + return false; +} + DDim slice_ddim(const DDim& dim, int begin, int end) { PADDLE_ENFORCE(begin >= 0 && end <= dim.size(), "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", diff --git a/paddle/fluid/framework/ddim.h b/paddle/fluid/framework/ddim.h index 31a41dab2a1f1d6bad9fe697c5d367f32e219160..7d2e296b6c1a99180acc105eb73754233cfa15f4 100644 --- a/paddle/fluid/framework/ddim.h +++ b/paddle/fluid/framework/ddim.h @@ -182,6 +182,8 @@ std::vector vectorize2int(const DDim& ddim); int64_t product(const DDim& ddim); +bool contain_unknown_dim(const DDim& ddim); + /** * \brief Slice a ddim * diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 2f6a816cbff327424dd5424a11ddce0bcac8537a..615cfaa4f31a2411685652c2a7581da6f361eaf3 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -1,22 +1,12 @@ cc_library(var_handle SRCS var_handle.cc DEPS place framework_proto node) cc_library(op_handle_base SRCS op_handle_base.cc DEPS var_handle device_context lod_tensor) -cc_library(op_graph_view SRCS op_graph_view.cc DEPS op_handle_base) + cc_library(scale_loss_grad_op_handle SRCS scale_loss_grad_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) cc_library(fetch_op_handle SRCS fetch_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) cc_library(computation_op_handle SRCS computation_op_handle.cc DEPS framework_proto scope place operator op_registry) cc_library(rpc_op_handle SRCS rpc_op_handle.cc DEPS framework_proto scope place operator op_registry) cc_library(fetch_barrier_op_handle SRCS fetch_barrier_op_handle.cc DEPS framework_proto scope place operator op_registry) - cc_library(multi_devices_helper SRCS multi_devices_helper.cc DEPS graph graph_helper) -cc_library(multi_devices_graph_print_pass SRCS multi_devices_graph_print_pass.cc DEPS multi_devices_helper) -cc_library(multi_devices_graph_check_pass SRCS multi_devices_graph_check_pass.cc DEPS multi_devices_helper) - -cc_library(alloc_continuous_space_for_grad_pass SRCS alloc_continuous_space_for_grad_pass.cc DEPS graph graph_helper) -cc_library(fuse_adam_op_pass SRCS fuse_adam_op_pass.cc fuse_optimizer_op_pass.cc DEPS graph graph_helper) -cc_library(fuse_sgd_op_pass SRCS fuse_sgd_op_pass.cc fuse_optimizer_op_pass.cc DEPS graph graph_helper) -cc_library(fuse_momentum_op_pass SRCS fuse_momentum_op_pass.cc fuse_optimizer_op_pass.cc DEPS graph graph_helper) - -cc_library(record_skip_memory_opt_vars_pass SRCS record_skip_memory_opt_vars_pass.cc DEPS graph graph_helper) cc_library(variable_visitor SRCS variable_visitor.cc DEPS lod_tensor selected_rows) @@ -27,7 +17,7 @@ if(WITH_DISTRIBUTE) endif() endif() -set(all_reduce_deps all_reduce_op_handle) + if(WITH_GPU) nv_library(all_reduce_op_handle SRCS all_reduce_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory dynload_cuda variable_visitor) @@ -37,7 +27,6 @@ if(WITH_GPU) if(WITH_DGC) nv_library(sparse_all_reduce_op_handle SRCS sparse_all_reduce_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory dynload_cuda variable_visitor dgc all_reduce_op_handle) - set(all_reduce_deps sparse_all_reduce_op_handle) endif() if(WITH_DISTRIBUTE) @@ -68,34 +57,12 @@ endif() cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor) -if(WITH_GPU) -cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) -else() -cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) -endif() - -cc_library(memory_optimize_pass SRCS memory_optimize_pass.cc DEPS memory_optimize_helper pass) -cc_library(inplace_op_pass SRCS inplace_op_pass.cc DEPS memory_optimize_pass op_info) -cc_library(modify_op_lock_and_record_event_pass SRCS modify_op_lock_and_record_event_pass.cc DEPS computation_op_handle op_graph_view multi_devices_helper) -cc_library(reference_count_pass_helper SRCS reference_count_pass_helper.cc DEPS garbage_collector computation_op_handle) cc_library(eager_deletion_op_handle SRCS eager_deletion_op_handle.cc DEPS lod_tensor selected_rows reference_count_pass_helper) -cc_library(while_op_eager_deletion_pass SRCS while_op_eager_deletion_pass.cc DEPS while_op_helper graph_helper pass computation_op_handle) -cc_library(eager_deletion_pass SRCS eager_deletion_pass.cc DEPS computation_op_handle eager_deletion_op_handle graph graph_helper pass while_op_eager_deletion_pass) -cc_library(reference_count_pass SRCS reference_count_pass.cc DEPS computation_op_handle graph graph_helper pass op_graph_view reference_count_pass_helper) - -cc_library(sequential_execution_pass SRCS sequential_execution_pass.cc DEPS graph graph_helper pass) -cc_library(all_reduce_deps_pass SRCS all_reduce_deps_pass.cc DEPS graph graph_helper pass) - -cc_library(multi_devices_graph_pass SRCS multi_devices_graph_pass.cc DEPS multi_devices_helper computation_op_handle - scale_loss_grad_op_handle rpc_op_handle fetch_barrier_op_handle ${all_reduce_deps} reduce_op_handle broadcast_op_handle fused_broadcast_op_handle) - -cc_library(fuse_all_reduce_op_pass SRCS fuse_all_reduce_op_pass.cc DEPS graph graph_helper fused_all_reduce_op_handle) set(SSA_GRAPH_EXECUTOR_DEPS graph framework_proto sequential_execution_pass modify_op_lock_and_record_event_pass all_reduce_deps_pass reference_count_pass eager_deletion_pass memory_optimize_pass inplace_op_pass) if (WITH_GPU) list(APPEND SSA_GRAPH_EXECUTOR_DEPS reference_count_pass) endif() -cc_test(memory_optimize_helper_test SRCS memory_optimize_helper_test.cc memory_optimize_helper.cc DEPS framework_proto graph graph_helper op_registry) cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS ${SSA_GRAPH_EXECUTOR_DEPS}) cc_library(threaded_ssa_graph_executor SRCS threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope diff --git a/paddle/fluid/framework/details/alloc_continuous_space_for_grad_pass.cc b/paddle/fluid/framework/details/alloc_continuous_space_for_grad_pass.cc deleted file mode 100644 index 58ec427859e9f0ec4d29cc419f5bfe382e245852..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/details/alloc_continuous_space_for_grad_pass.cc +++ /dev/null @@ -1,411 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/framework/details/alloc_continuous_space_for_grad_pass.h" -#include -#include -#include -#include -#include -#include "paddle/fluid/framework/details/build_strategy.h" -#include "paddle/fluid/framework/details/multi_devices_helper.h" -#include "paddle/fluid/framework/ir/graph_helper.h" -#include "paddle/fluid/framework/op_registry.h" - -DEFINE_uint64(fuse_parameter_memory_size, 0, // 0 KB - "fuse_parameter_memory_size is up limited memory size " - "of one group parameters' gradient which is the input " - "of communication calling(e.g NCCLAllReduce). " - "The default value is 0, it means that " - "not set group according to memory_size."); -DEFINE_int32( - fuse_parameter_groups_size, 3, - "fuse_parameter_groups_size is the size of one group parameters' gradient. " - "The default value is a experimental result. If the " - "fuse_parameter_groups_size is 1, it means that the groups size is " - "the number of parameters' gradient. If the fuse_parameter_groups_size is " - "-1, it means that there are only one group. The default value is 3, it is " - "an experimental value."); - -namespace paddle { -namespace framework { -namespace details { -// SetFuseParameterGroupsSize and SetFuseParameterMemorySize are used in unit -// test, because it is invalid that seting 'FLAGS_fuse_parameter_memory_size' -// and 'FLAGS_fuse_parameter_groups_size' in unit test. -void SetFuseParameterGroupsSize(int group_size) { - FLAGS_fuse_parameter_groups_size = group_size; -} - -int GetFuseParameterGroupsSize() { return FLAGS_fuse_parameter_groups_size; } - -void SetFuseParameterMemorySize(uint64_t memory_size) { - FLAGS_fuse_parameter_memory_size = memory_size; -} - -uint64_t GetFuseParameterMemorySize() { - return FLAGS_fuse_parameter_memory_size; -} - -static const char kUnKnow[] = "@UNKNOW@"; -static framework::proto::VarType::Type kDefaultDtype = - framework::proto::VarType::Type::VarType_Type_BOOL; - -void AllocContinuousSpaceForGradPass::ApplyImpl(ir::Graph *graph) const { - ir::Graph &result = *graph; - - auto &places = Get>(kPlaces); - auto &local_scopes = Get>(kLocalScopes); - - ResetAttribute(kParamsAndGrads, &result); - ResetAttribute(kGroupGradsAndParams, &result); - - // NOTE: The operator nodes should be in topology order. - std::vector topo_nodes = ir::TopologySortOperations(result); - auto ¶ms_grads = result.Get(kParamsAndGrads); - for (auto &node : topo_nodes) { - RecordParamsAndGrads(node, ¶ms_grads); - } - - if (params_grads.size() == 0) { - VLOG(10) << "Doesn't find gradients"; - return; - } - - std::unordered_map vars; - for (ir::Node *node : result.Nodes()) { - if (node->IsVar() && node->Var()) { - // Note: The graph may have the same name node. For example, parameter - // is the input of operator and it also is the output of optimizer; - vars.emplace(node->Var()->Name(), node); - } - } - - auto &group_grads_params = - result.Get(kGroupGradsAndParams); - - // Note: the order of params_grads may be changed by SetGroupGradsAndParams. - SetGroupGradsAndParams(vars, params_grads, &group_grads_params); - - params_grads.clear(); - for (auto &group_p_g : group_grads_params) { - params_grads.insert(params_grads.begin(), group_p_g.begin(), - group_p_g.end()); - } - for (auto &p_g : params_grads) { - std::swap(p_g.first, p_g.second); - } - - // Set Gradients as Persistable to prevent this var becoming reusable. - auto dtype = kDefaultDtype; - for (auto &p_g : params_grads) { - // Get gradient var - auto iter = vars.find(p_g.second); - PADDLE_ENFORCE(iter != vars.end(), "%s is not found.", p_g.second); - iter->second->Var()->SetPersistable(true); - - PADDLE_ENFORCE(IsSupportedVarType(iter->second->Var()->GetType())); - - // Get Dtype - auto ele_dtype = iter->second->Var()->GetDataType(); - if (dtype == kDefaultDtype) { - dtype = ele_dtype; - PADDLE_ENFORCE_NE(ele_dtype, kDefaultDtype, - "The data type should not be bool."); - } - PADDLE_ENFORCE_EQ(ele_dtype, dtype, - "The data type of input is not consistent."); - } - - // Create a FusedVarsSet to avoid duplicating names for fused_var in other - // pass. - if (!result.Has(kFusedVars)) { - result.Set(kFusedVars, new FusedVars); - } - // the kFusedGrads is used be fuse_optimizer_op_pass. - result.Set(kFusedGrads, new FusedGrads); - - // the fused_var_name should be unique, so it appends - // params_grads.begin()->second. - auto fused_var_name = std::string(kFusedVarNamePrefix) + "@GRAD@" + - params_grads.begin()->second; - result.Get(kFusedGrads) = fused_var_name; - auto &fused_var_set = result.Get(kFusedVars); - PADDLE_ENFORCE_EQ(fused_var_set.count(fused_var_name), 0, - "%s is duplicate in FusedVars.", fused_var_name); - fused_var_set.insert(fused_var_name); - - InitFusedVarsAndAllocSpaceForVars(places, local_scopes, vars, fused_var_name, - params_grads); -} - -template -void AllocContinuousSpaceForGradPass::ResetAttribute( - const std::string &attr_name, ir::Graph *graph) const { - if (graph->Has(attr_name)) { - VLOG(10) << attr_name << " is reset."; - graph->Erase(attr_name); - } - graph->Set(attr_name, new AttrType); -} - -void AllocContinuousSpaceForGradPass::SetGroupGradsAndParams( - const std::unordered_map &var_nodes, - const ParamsAndGrads ¶ms_grads, - GroupGradsAndParams *group_grads_params) const { - SetGroupAccordingToLayers(var_nodes, params_grads, group_grads_params); - SetGroupAccordingToMemorySize(var_nodes, group_grads_params); - SetGroupAccordingToGroupSize(var_nodes, group_grads_params); -} - -void AllocContinuousSpaceForGradPass::SetGroupAccordingToLayers( - const std::unordered_map &var_nodes, - const ParamsAndGrads ¶ms_grads, - GroupGradsAndParams *group_grads_params) const { - std::unordered_map> layer_params; - - for (size_t i = 0; i < params_grads.size(); ++i) { - auto pos = params_grads[i].first.find_first_of("."); - if (pos == std::string::npos) { - layer_params[std::string(kUnKnow)].emplace_back(i); - } else { - layer_params[params_grads[i].first.substr(0, pos)].emplace_back(i); - } - } - - group_grads_params->reserve(layer_params.size()); - for (size_t i = 0; i < params_grads.size(); ++i) { - auto pos = params_grads[i].first.find_first_of("."); - std::string key = kUnKnow; - if (pos != std::string::npos) { - key = params_grads[i].first.substr(0, pos); - } - auto iter = layer_params.find(key); - if (iter == layer_params.end()) continue; - - group_grads_params->emplace_back(); - auto &local_group_grads_params = group_grads_params->back(); - for (auto &idx : iter->second) { - local_group_grads_params.emplace_back( - std::make_pair(params_grads[idx].second, params_grads[idx].first)); - } - layer_params.erase(iter); - } - - VLOG(10) << "SetGroupAccordingToLayers: "; - for (size_t i = 0; i < group_grads_params->size(); ++i) { - VLOG(10) << "group " << i; - std::stringstream out; - for (auto &p_g : group_grads_params->at(i)) { - out << "(" << p_g.second << ", " << p_g.first << "), "; - } - VLOG(10) << out.str(); - } -} - -void AllocContinuousSpaceForGradPass::SetGroupAccordingToMemorySize( - const std::unordered_map &var_nodes, - GroupGradsAndParams *group_grads_params) const { - const uint64_t group_memory_size = GetFuseParameterMemorySize(); - if (group_memory_size == 0) { - return; - } - GroupGradsAndParams local_group_grads_params; - size_t j = 0; - while (j < group_grads_params->size()) { - local_group_grads_params.emplace_back(); - auto &group_p_g = local_group_grads_params.back(); - size_t local_group_memory_size = 0; - while (j < group_grads_params->size()) { - std::for_each( - group_grads_params->at(j).begin(), group_grads_params->at(j).end(), - [&local_group_memory_size, - &var_nodes](const std::pair &g_p) { - auto iter = var_nodes.find(g_p.second); - PADDLE_ENFORCE(iter != var_nodes.end(), "%s is not found.", - g_p.second); - auto shape = iter->second->Var()->GetShape(); - size_t size = - framework::SizeOfType(iter->second->Var()->GetDataType()); - std::for_each(shape.begin(), shape.end(), - [&size](const int64_t &n) { size *= n; }); - local_group_memory_size += size; - }); - group_p_g.insert(group_p_g.end(), group_grads_params->at(j).begin(), - group_grads_params->at(j).end()); - ++j; - if (local_group_memory_size >= group_memory_size) { - break; - } - } - } - - std::swap(*group_grads_params, local_group_grads_params); - - VLOG(10) << string::Sprintf("SetGroupAccordingToMemorySize(memory_size: %d):", - group_memory_size); - for (size_t i = 0; i < group_grads_params->size(); ++i) { - VLOG(10) << "group " << i; - std::stringstream out; - for (auto &g_p : group_grads_params->at(i)) { - auto iter = var_nodes.find(g_p.second); - PADDLE_ENFORCE(iter != var_nodes.end(), "%s is not found.", g_p.second); - auto shape = iter->second->Var()->GetShape(); - size_t size = framework::SizeOfType(iter->second->Var()->GetDataType()); - std::for_each(shape.begin(), shape.end(), - [&size](const int64_t &n) { size *= n; }); - out << string::Sprintf("(%s(%d), %s)", g_p.second, size, g_p.first); - } - VLOG(10) << out.str(); - } -} - -void AllocContinuousSpaceForGradPass::SetGroupAccordingToGroupSize( - const std::unordered_map &var_nodes, - GroupGradsAndParams *group_grads_params) const { - if (GetFuseParameterGroupsSize() == 1) { - return; - } - const int group_size = GetFuseParameterGroupsSize() == -1 - ? static_cast(group_grads_params->size()) - : GetFuseParameterGroupsSize(); - PADDLE_ENFORCE_GT(group_size, 1); - size_t groups = (group_grads_params->size() + group_size - 1) / group_size; - GroupGradsAndParams local_group_grads_params; - local_group_grads_params.reserve(groups); - - size_t j = 0; - for (size_t i = 0; i < groups; ++i) { - local_group_grads_params.emplace_back(); - auto &group_p_g = local_group_grads_params.back(); - group_p_g.reserve(group_size); - while (j < group_grads_params->size()) { - group_p_g.insert(group_p_g.end(), group_grads_params->at(j).begin(), - group_grads_params->at(j).end()); - ++j; - if (j % group_size == 0) break; - } - } - std::swap(*group_grads_params, local_group_grads_params); - - VLOG(10) << string::Sprintf("SetGroupAccordingToGroupSize(group_size: %d):", - group_size); - for (size_t i = 0; i < group_grads_params->size(); ++i) { - VLOG(10) << "group " << i; - std::stringstream out; - for (auto &p_g : group_grads_params->at(i)) { - out << "(" << p_g.second << ", " << p_g.first << "), "; - } - VLOG(10) << out.str(); - } -} - -bool AllocContinuousSpaceForGradPass::IsSupportedVarType( - const proto::VarType::Type &type) const { - // Current only support LOD_TENSOR. - return type == proto::VarType::LOD_TENSOR; -} - -void AllocContinuousSpaceForGradPass::RecordParamsAndGrads( - ir::Node *node, ParamsAndGrads *params_grads) const { - try { - bool is_bk_op = - static_cast(boost::get(node->Op()->GetAttr( - OpProtoAndCheckerMaker::OpRoleAttrName())) & - static_cast(OpRole::kBackward)); - if (!is_bk_op) return; - - // Currently, we assume that once gradient is generated, it can be - // broadcast, and each gradient is only broadcast once. - auto backward_vars = - boost::get>(node->Op()->GetNullableAttr( - OpProtoAndCheckerMaker::OpRoleVarAttrName())); - PADDLE_ENFORCE_EQ(backward_vars.size() % 2, static_cast(0)); - - for (size_t i = 0; i < backward_vars.size(); i += 2) { - VLOG(10) << "Trainable parameter: " << backward_vars[i] - << ", gradient: " << backward_vars[i + 1]; - - params_grads->emplace_back(std::make_pair(backward_vars[i] /*param*/, - backward_vars[i + 1] /*grad*/)); - } - } catch (boost::bad_get e) { - } -} - -void AllocContinuousSpaceForGradPass::InitFusedVarsAndAllocSpaceForVars( - const std::vector &places, - const std::vector &local_scopes, - const std::unordered_map &vars, - const std::string &fused_var_name, - const ParamsAndGrads ¶ms_grads) const { - // Init Gradients and FusedVars - VLOG(10) << "Init FusedVars and Gradients."; - for (auto it = local_scopes.rbegin(); it != local_scopes.rend(); ++it) { - auto &scope = *it; - - PADDLE_ENFORCE(scope->FindVar(fused_var_name) == nullptr, - "%s has existed in scope.", fused_var_name); - scope->Var(fused_var_name)->GetMutable(); - - for (auto &p_g : params_grads) { - auto iter = vars.find(p_g.second); - PADDLE_ENFORCE(iter != vars.end()); - PADDLE_ENFORCE_NOT_NULL(iter->second->Var()); - PADDLE_ENFORCE_EQ(iter->second->Var()->GetType(), - proto::VarType::LOD_TENSOR); - scope->Var(p_g.second)->GetMutable(); - } - } - - // Alloc continuous space for vars. - std::vector grads_name; - std::vector params_name; - grads_name.reserve(params_grads.size()); - params_name.reserve(params_grads.size()); - for (auto &p_g : params_grads) { - params_name.emplace_back(p_g.first); - grads_name.emplace_back(p_g.second); - } - framework::ProgramDesc program_desc; - AppendAllocSpaceForVarsOp(params_name, grads_name, fused_var_name, - program_desc.MutableBlock(0)); - - for (size_t i = 0; i < local_scopes.size(); ++i) { - for (auto &op_desc : program_desc.Block(0).AllOps()) { - auto op = OpRegistry::CreateOp(*op_desc); - op->Run(*local_scopes[i], places[i]); - } - } -} - -void AllocContinuousSpaceForGradPass::AppendAllocSpaceForVarsOp( - const std::vector ¶ms_name, - const std::vector &grads_name, - const std::string &fused_var_name, BlockDesc *global_block) const { - auto op_desc = global_block->AppendOp(); - op_desc->SetType("alloc_continuous_space"); - op_desc->SetInput("Input", params_name); - op_desc->SetOutput("Output", grads_name); - op_desc->SetOutput("FusedOutput", {fused_var_name}); -} - -} // namespace details -} // namespace framework -} // namespace paddle - -REGISTER_PASS(alloc_continuous_space_for_grad_pass, - paddle::framework::details::AllocContinuousSpaceForGradPass) - .RequirePassAttr(paddle::framework::details::kPlaces) - .RequirePassAttr(paddle::framework::details::kLocalScopes); diff --git a/paddle/fluid/framework/details/alloc_continuous_space_for_grad_pass.h b/paddle/fluid/framework/details/alloc_continuous_space_for_grad_pass.h deleted file mode 100644 index e6d56f17cc4ef7e07500aae8067211a7b9ac04b0..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/details/alloc_continuous_space_for_grad_pass.h +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#pragma once -#include -#include -#include -#include -#include -#include "paddle/fluid/framework/details/build_strategy.h" -#include "paddle/fluid/framework/details/multi_devices_helper.h" -#include "paddle/fluid/framework/ir/graph_helper.h" - -namespace paddle { -namespace framework { -namespace details { - -void SetFuseParameterGroupsSize(int group_size); -int GetFuseParameterGroupsSize(); - -void SetFuseParameterMemorySize(uint64_t memory_size); -uint64_t GetFuseParameterMemorySize(); - -class AllocContinuousSpaceForGradPass : public ir::Pass { - protected: - void ApplyImpl(ir::Graph *graph) const override; - - template - void ResetAttribute(const std::string &attr_name, ir::Graph *graph) const; - - void SetGroupGradsAndParams( - const std::unordered_map &var_nodes, - const ParamsAndGrads ¶ms_grads, - GroupGradsAndParams *group_grads_params) const; - - void SetGroupAccordingToLayers( - const std::unordered_map &var_nodes, - const ParamsAndGrads ¶ms_grads, - GroupGradsAndParams *group_grads_params) const; - - void SetGroupAccordingToMemorySize( - const std::unordered_map &var_nodes, - GroupGradsAndParams *group_grads_params) const; - - void SetGroupAccordingToGroupSize( - const std::unordered_map &var_nodes, - GroupGradsAndParams *group_grads_params) const; - - private: - bool IsSupportedVarType(const proto::VarType::Type &type) const; - - void RecordParamsAndGrads(ir::Node *node, ParamsAndGrads *params_grads) const; - - void InitFusedVarsAndAllocSpaceForVars( - const std::vector &places, - const std::vector &local_scopes, - const std::unordered_map &vars, - const std::string &fused_var_name, - const ParamsAndGrads ¶ms_grads) const; - - void AppendAllocSpaceForVarsOp(const std::vector ¶ms_name, - const std::vector &grads_name, - const std::string &fused_var_name, - BlockDesc *global_block) const; -}; - -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index 8aa4a9645dd9866c3769bbfac445c51283ec66d2..845fdf511e455509ff3e871084c17163c90c674a 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -17,15 +17,14 @@ limitations under the License. */ #include #include #include -#include "paddle/fluid/framework/details/memory_optimize_helper.h" -#include "paddle/fluid/framework/details/multi_devices_graph_pass.h" -#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h" #include "paddle/fluid/framework/details/reduce_op_handle.h" -#include "paddle/fluid/framework/details/sequential_execution_pass.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/framework/ir/graph_to_program_pass.h" #include "paddle/fluid/framework/ir/graph_viz_pass.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h" +#include "paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.h" +#include "paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.h" namespace paddle { namespace framework { @@ -173,10 +172,10 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { const std::string graph_path = string::Sprintf("%s%s", strategy_.debug_graphviz_path_.c_str(), "_multi_devices_graph"); - multi_devices_print_pass->Set(kGraphvizPath, + multi_devices_print_pass->Set(ir::kGraphvizPath, new std::string(graph_path)); - multi_devices_print_pass->Set( - "graph_printer", new details::GraphvizSSAGraphPrinter); + multi_devices_print_pass->Set( + "graph_printer", new ir::GraphvizSSAGraphPrinter); } // experimental shows that the program will be faster if append @@ -240,7 +239,7 @@ std::shared_ptr BuildStrategy::CreatePassesFromStrategy( } bool BuildStrategy::IsMultiDevPass(const std::string &pass_name) const { - return framework::details::MultiDevSSAGraphBuilder().count(pass_name) > 0; + return framework::ir::MultiDevSSAGraphBuilder().count(pass_name) > 0; } ir::Graph *BuildStrategy::Apply(ir::Graph *graph, @@ -263,13 +262,13 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph, if (IsMultiDevPass(pass->Type())) { pass->Erase(kPlaces); pass->SetNotOwned>(kPlaces, &places); - pass->Erase(kLossVarName); - pass->SetNotOwned(kLossVarName, &loss_var_name); + pass->Erase(ir::kLossVarName); + pass->SetNotOwned(ir::kLossVarName, &loss_var_name); pass->Erase(kLocalScopes); pass->SetNotOwned>(kLocalScopes, &local_scopes); - pass->Erase(kNRanks); - pass->Set(kNRanks, new size_t(nranks)); + pass->Erase(ir::kNRanks); + pass->Set(ir::kNRanks, new size_t(nranks)); #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr; @@ -312,8 +311,8 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph, continue; } } else if (pass->Type() == "inplace_pass") { - pass->Erase(kUseCuda); - pass->Set(kUseCuda, new bool(use_cuda)); + pass->Erase(ir::kUseCuda); + pass->Set(ir::kUseCuda, new bool(use_cuda)); } VLOG(3) << "Start Apply Pass " << pass->Type(); graph = pass->Apply(graph); diff --git a/paddle/fluid/framework/details/eager_deletion_op_handle.cc b/paddle/fluid/framework/details/eager_deletion_op_handle.cc index 52e6d599ebbdd2c9e1fe51a7d223b63801143609..c8e27c7275fe70598e41cbb2cc8482d610c2e113 100644 --- a/paddle/fluid/framework/details/eager_deletion_op_handle.cc +++ b/paddle/fluid/framework/details/eager_deletion_op_handle.cc @@ -31,7 +31,7 @@ namespace details { EagerDeletionOpHandle::EagerDeletionOpHandle( ir::Node *node, const Scope *scope, const platform::Place &place, const std::unordered_set &var_names, GarbageCollector *gc, - AtomicReferenceCountMap *ref_cnts) + ir::AtomicReferenceCountMap *ref_cnts) : OpHandleBase(node), scope_(scope), var_names_(var_names.begin(), var_names.end()), diff --git a/paddle/fluid/framework/details/eager_deletion_op_handle.h b/paddle/fluid/framework/details/eager_deletion_op_handle.h index 6300b9173b5ae7278dc22508b68d878a1589047c..fe723922ca711a6348fddcaabbdf635cb7d2983d 100644 --- a/paddle/fluid/framework/details/eager_deletion_op_handle.h +++ b/paddle/fluid/framework/details/eager_deletion_op_handle.h @@ -20,7 +20,7 @@ #include #include #include "paddle/fluid/framework/details/op_handle_base.h" -#include "paddle/fluid/framework/details/reference_count_pass_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h" namespace paddle { namespace framework { @@ -34,7 +34,7 @@ class EagerDeletionOpHandle : public OpHandleBase { const platform::Place &place, const std::unordered_set &var_names, GarbageCollector *gc, - AtomicReferenceCountMap *ref_cnts); + ir::AtomicReferenceCountMap *ref_cnts); ~EagerDeletionOpHandle(); @@ -55,8 +55,8 @@ class EagerDeletionOpHandle : public OpHandleBase { const Scope *scope_; std::vector var_names_; - GarbageCollector *gc_; // not own - AtomicReferenceCountMap *ref_cnts_; // not own + GarbageCollector *gc_; // not own + ir::AtomicReferenceCountMap *ref_cnts_; // not own #ifdef PADDLE_WITH_CUDA platform::CUDADeviceContext *dev_ctx_{nullptr}; cudaEvent_t event_{nullptr}; diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index 6c8b8937ebe646042f71cb58cfbc2d32426a4e3c..fe14e3e91da34e5993a68d10a13b275bab576ce6 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -63,8 +63,7 @@ void FetchOpHandle::RunImpl() { auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA - TensorCopy(t, cpu, *dev_ctxes_.at(t.place()), &tensors_[i]); - dev_ctxes_.at(t.place())->Wait(); + TensorCopy(t, cpu, &tensors_[i]); #endif } else { tensors_[i].ShareDataWith(t); diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index 647b238634a51aed92f3bcf4171416838c0f3cc6..3412fa0bb76fafbef7d1abbee72bf46c361152f9 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -27,7 +27,7 @@ namespace paddle { namespace framework { namespace details { -constexpr char kLocalExecScopeName[] = "@LOCAL_SCOPE@"; +constexpr char kLocalExecScopeName[] = "@LOCAL_EXE_SCOPE@"; // Wraps ir::Node and provide helper utilities. // It's responsible for populating necessary fields of ir::Node. diff --git a/paddle/fluid/framework/details/record_skip_memory_opt_vars_pass.cc b/paddle/fluid/framework/details/record_skip_memory_opt_vars_pass.cc deleted file mode 100644 index 7cb2544ebbfbf42f5e3c014528c56bf17989292e..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/details/record_skip_memory_opt_vars_pass.cc +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include "paddle/fluid/framework/details/memory_optimize_helper.h" -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/graph_helper.h" -#include "paddle/fluid/framework/ir/pass.h" -#include "paddle/fluid/framework/op_proto_maker.h" - -namespace paddle { -namespace framework { -namespace details { - -class RecordSkipMemoryOptVarsPass : public ir::Pass { - protected: - void ApplyImpl(ir::Graph* graph) const override { - PADDLE_ENFORCE(!graph->Has(kMemOptSkipVars)); - graph->Set(kMemOptSkipVars, new MemOptSkipVars); - auto& skip_vars = graph->Get(kMemOptSkipVars); - - // NOTE(zcd): Insert OpRoleVars to SkipVarSet to prevent the vars are rename - // in memory optimize pass. - InsertOpRoleVarsToSkipVarSet(graph, &skip_vars); - } - - void InsertOpRoleVarsToSkipVarSet(const ir::Graph* graph, - MemOptSkipVars* skip_vars) const { - for (auto& node : graph->Nodes()) { - PADDLE_ENFORCE_NOT_NULL(node, "The node should not be nullptr."); - if (node->IsOp() && node->Op()) { - try { - auto op_role_vars = - boost::get>(node->Op()->GetNullableAttr( - OpProtoAndCheckerMaker::OpRoleVarAttrName())); - PADDLE_ENFORCE_EQ(op_role_vars.size() % 2, 0); - for (size_t i = 0; i < op_role_vars.size(); i += 2) { - auto& g_name = op_role_vars[i + 1]; - skip_vars->insert(g_name); - } - } catch (boost::bad_get e) { - } - } - } - } -}; - -} // namespace details -} // namespace framework -} // namespace paddle - -REGISTER_PASS(record_skip_memory_opt_vars_pass, - paddle::framework::details::RecordSkipMemoryOptVarsPass); diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc index 4a7b7d1329a6a6c6da9b581eaa93f54038c9420d..247d78479348da998a46d7838b89c481c9e299e5 100644 --- a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc @@ -68,15 +68,7 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run( ++drop_scope_counter_; if (drop_scope_counter_ == strategy_.num_iteration_per_drop_scope_) { - WaitComputationalStreams(); - - for (auto &scope : local_scopes_) { - auto &local_scope = - *scope->Var(details::kLocalExecScopeName)->GetMutable(); - scope->DeleteScope(local_scope); - } - - drop_scope_counter_ = 0; + DropLocalExeScopes(); } if (eptr) { std::rethrow_exception(eptr); @@ -84,6 +76,25 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run( return fetch_data; } } + +void ScopeBufferedSSAGraphExecutor::DropLocalExeScopes() { + drop_scope_counter_ = 0; + for (auto p : places_) { + platform::DeviceContextPool::Instance().Get(p)->Wait(); + } + + for (auto &scope : local_scopes_) { + auto &local_scope = + *scope->Var(details::kLocalExecScopeName)->GetMutable(); + scope->DeleteScope(local_scope); + VLOG(3) << "Drop local execution scope: " << local_scope; + } +} + +bool ScopeBufferedSSAGraphExecutor::NeedCreateLocalExeScope() { + return drop_scope_counter_ == 0; +} + } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h index 0f6340213daee98a75401f9db0e628f7b4fd79fc..030777cad894fa24ccdc0afa1aae8e7e4caa90ee 100644 --- a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h @@ -47,17 +47,12 @@ class ScopeBufferedSSAGraphExecutor : public SSAGraphExecutor { FeedFetchList Run(const std::vector& fetch_tensors) override; - private: - inline void WaitComputationalStreams() { - // Wait All computational streams - for (auto p : places_) { - platform::DeviceContextPool::Instance().Get(p)->Wait(); - } - } + void DropLocalExeScopes(); + + bool NeedCreateLocalExeScope(); private: size_t drop_scope_counter_{0}; - ExecutionStrategy strategy_; std::unique_ptr underlying_executor_; std::vector local_scopes_; diff --git a/paddle/fluid/framework/details/sequential_execution_pass.cc b/paddle/fluid/framework/details/sequential_execution_pass.cc deleted file mode 100644 index 839f8dc43ed8c6f13380732b221520b3bb59b099..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/details/sequential_execution_pass.cc +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/framework/details/sequential_execution_pass.h" -#include -#include -#include -#include -#include "paddle/fluid/framework/details/memory_optimize_helper.h" -#include "paddle/fluid/framework/op_proto_maker.h" - -namespace paddle { -namespace framework { -namespace details { - -static bool IsSameOpDesc(OpDesc *op1, OpDesc *op2) { - return op1->Type() == op2->Type() && op1->Inputs() == op2->Inputs() && - op1->Outputs() == op2->Outputs(); -} - -void SequentialExecutionPass::ApplyImpl(ir::Graph *graph) const { - // FIXME(zjl): Insert dependencies between some distributed ops may cause - // the multi_devices_graph_pass fails. So we skip these ops here. - // Indeed, maybe we should not insert dependencies between these ops - // casually, which may cause deadlock easily. - // We should add more skipped distributed ops when found errors in - // multi_devices_graph_pass - static std::unordered_set skip_dist_ops{ - "send", "recv", "send_barrier", "fetch_barrier"}; - - auto &ops = graph->Get>(kStaleProgramOpDescs); - std::vector op_node_list; - op_node_list.reserve(ops.size()); - - std::unordered_map op_deps; - std::unordered_map> pending_ops; - std::unordered_set ready_ops; - - for (ir::Node *node : graph->Nodes()) { - if (!node->IsOp()) continue; - std::unordered_set preceding_ops; - for (auto *in : node->inputs) { - PADDLE_ENFORCE(in->IsVar(), - "Preceding Node of Op Nodes must be Var Node"); - if (in->inputs.empty()) continue; - PADDLE_ENFORCE(in->inputs.size() == 1 && in->inputs[0]->IsOp(), - "Preceding Op Node of Var Node must be unique"); - preceding_ops.insert(in->inputs[0]); - pending_ops[in->inputs[0]].insert(node); - } - op_deps[node] = preceding_ops.size(); - if (preceding_ops.empty()) { - ready_ops.insert(node); - } - } - - for (auto *op_desc : ops) { - ir::Node *found_node = nullptr; - for (auto *node : ready_ops) { - if (IsSameOpDesc(op_desc, node->Op())) { - PADDLE_ENFORCE(found_node == nullptr, - "Found multiple op_desc in graph: %s", op_desc->Type()); - found_node = node; - } - } - - PADDLE_ENFORCE_NOT_NULL(found_node, "Cannot find op_desc in graph: %s", - op_desc->Type()); - for (auto *pending_op : pending_ops[found_node]) { - if (--op_deps.at(pending_op) == 0) { - ready_ops.insert(pending_op); - } - } - ready_ops.erase(found_node); - if (skip_dist_ops.count(op_desc->Type()) == 0) { - op_node_list.push_back(found_node); - } - } - - for (size_t i = 1; i < op_node_list.size(); ++i) { - auto *dep_var = graph->CreateControlDepVar(); - op_node_list[i]->inputs.push_back(dep_var); - op_node_list[i - 1]->outputs.push_back(dep_var); - dep_var->outputs.push_back(op_node_list[i]); - dep_var->inputs.push_back(op_node_list[i - 1]); - VLOG(10) << "Add dependencies between " << op_node_list[i - 1]->Name() - << " and " << op_node_list[i]->Name(); - } -} - -} // namespace details -} // namespace framework -} // namespace paddle - -REGISTER_PASS(sequential_execution_pass, - paddle::framework::details::SequentialExecutionPass) - .RequireGraphAttr(paddle::framework::details::kStaleProgramOpDescs); diff --git a/paddle/fluid/framework/downpour_worker.cc b/paddle/fluid/framework/downpour_worker.cc index 386ffd84c57063e950cd8b0d57304c66190be4c4..8e184e5d3cbc6d73c45aef97981dda410bc0f962 100644 --- a/paddle/fluid/framework/downpour_worker.cc +++ b/paddle/fluid/framework/downpour_worker.cc @@ -425,6 +425,7 @@ void DownpourWorker::TrainFiles() { } VLOG(3) << "push dense gradient done."; + // the following code should be more precise and clean // TODO(guru4elephant) int32_t tmp_push_dense_wait_times = -1; diff --git a/paddle/fluid/framework/inplace_op_inference.h b/paddle/fluid/framework/inplace_op_inference.h index fddcbaf596d52a428d41298c499d798b465f98a2..b5eb61f23e56fafca33e85ee4a288af53b9ceb2e 100644 --- a/paddle/fluid/framework/inplace_op_inference.h +++ b/paddle/fluid/framework/inplace_op_inference.h @@ -19,7 +19,7 @@ #include #include #include "glog/logging.h" -#include "paddle/fluid/framework/details/memory_optimize_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/type_defs.h" diff --git a/paddle/fluid/framework/inplace_op_inference_test.cc b/paddle/fluid/framework/inplace_op_inference_test.cc index cebca9207a35c9d907e3041f18af70e576bd8ea9..727e579d0d2db7cb22c5f5178a53bf52f291f313 100644 --- a/paddle/fluid/framework/inplace_op_inference_test.cc +++ b/paddle/fluid/framework/inplace_op_inference_test.cc @@ -18,7 +18,7 @@ #include #include #include "gtest/gtest.h" -#include "paddle/fluid/framework/details/memory_optimize_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h" #include "paddle/fluid/framework/ir/pass_builder.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_registry.h" @@ -33,7 +33,7 @@ namespace framework { std::unique_ptr CreateInplacePass() { auto pass = ir::PassRegistry::Instance().Get("inplace_pass"); - pass->Set(details::kUseCuda, new bool(true)); + pass->Set(ir::kUseCuda, new bool(true)); return pass; } @@ -225,7 +225,7 @@ TEST(InferInplace, SingleOpInplaceInToOut) { FakeSuccData(&prog); std::unique_ptr g(new ir::Graph(prog)); - g->Set(details::kMemOptSkipVars, new std::unordered_set()); + g->Set(ir::kMemOptSkipVars, new std::unordered_set()); g = test_SingleOpInplaceInToOut(std::move(g)); auto op_node = GetNodeFromGraph(g.get(), "single_op"); @@ -241,7 +241,7 @@ TEST(InferInplace, SingleOpInplaceInToOutNoInplace) { FakeNoInplaceData(&prog); std::unique_ptr g(new ir::Graph(prog)); - g->Set(details::kMemOptSkipVars, new std::unordered_set()); + g->Set(ir::kMemOptSkipVars, new std::unordered_set()); g = test_SingleOpInplaceInToOut(std::move(g)); auto op_node = GetNodeFromGraph(g.get(), "single_op"); @@ -274,7 +274,7 @@ TEST(InferInplace, MultiOutInplaceInToOut) { prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); std::unique_ptr g(new ir::Graph(prog)); - g->Set(details::kMemOptSkipVars, new std::unordered_set()); + g->Set(ir::kMemOptSkipVars, new std::unordered_set()); auto pass = CreateInplacePass(); pass->Apply(g.get()); auto op_node = GetNodeFromGraph(g.get(), "multi_out_op"); @@ -310,7 +310,7 @@ TEST(InferInplace, MultiGradInplaceInToOut) { prog.MutableBlock(0)->Var("z0")->SetShape({32, 15, 1024, 1024}); std::unique_ptr g(new ir::Graph(prog)); - g->Set(details::kMemOptSkipVars, new std::unordered_set()); + g->Set(ir::kMemOptSkipVars, new std::unordered_set()); auto pass = CreateInplacePass(); pass->Apply(g.get()); auto op_node = GetNodeFromGraph(g.get(), "multi_out_grad"); diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index 16fc1721eb6f5d2517ad45289f2415ef41749df2..032fcbedf49cb96c93e85971d5c03915af044310 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -3,6 +3,9 @@ file(WRITE ${pass_file} "// Generated by the paddle/fluid/framework/ir/CMakeList file(APPEND ${pass_file} "\#pragma once\n") file(APPEND ${pass_file} "\#include \"paddle/fluid/framework/ir/pass.h\"\n") +add_subdirectory(fuse_optimizer_ops_pass) +add_subdirectory(memory_optimize_pass) +add_subdirectory(multi_devices_graph_pass) # Usage: pass_library(target inference) will append to paddle_inference_pass.h unset(INFER_IR_PASSES CACHE) # clear the global variable @@ -34,7 +37,6 @@ function(pass_library TARGET DEST) endif() endfunction() - cc_library(node SRCS node.cc DEPS proto_desc) cc_library(graph SRCS graph.cc DEPS node pretty_log) cc_library(graph_helper SRCS graph_helper.cc DEPS graph) @@ -43,6 +45,8 @@ cc_library(graph_traits SRCS graph_traits.cc DEPS graph) cc_library(graph_pattern_detector SRCS graph_pattern_detector.cc DEPS graph graph_helper graph_traits) cc_library(fuse_pass_base SRCS fuse_pass_base.cc DEPS pass) +cc_library(alloc_continuous_space_for_grad_pass SRCS alloc_continuous_space_for_grad_pass.cc DEPS graph graph_helper) + pass_library(graph_to_program_pass base) pass_library(graph_viz_pass base) pass_library(lock_free_optimize_pass base) @@ -71,6 +75,7 @@ pass_library(runtime_context_cache_pass base) pass_library(expected_kernel_cache_pass base) pass_library(quant_conv2d_dequant_fuse_pass inference) pass_library(fillconstant_elementwisemul_fuse inference) +pass_library(shuffle_channel_detect_pass inference) if(ANAKIN_FOUND) pass_library(simplify_anakin_priorbox_detection_out_pass inference) diff --git a/paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.cc b/paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..12d5ad7ed8ccbe3db925ce59dacf935dad158e5c --- /dev/null +++ b/paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.cc @@ -0,0 +1,414 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h" +#include +#include +#include +#include +#include +#include "paddle/fluid/framework/details/build_strategy.h" +#include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/op_registry.h" + +DEFINE_uint64(fuse_parameter_memory_size, 0, // 0 KB + "fuse_parameter_memory_size is up limited memory size " + "of one group parameters' gradient which is the input " + "of communication calling(e.g NCCLAllReduce). " + "The default value is 0, it means that " + "not set group according to memory_size."); +DEFINE_int32( + fuse_parameter_groups_size, 3, + "fuse_parameter_groups_size is the size of one group parameters' gradient. " + "The default value is a experimental result. If the " + "fuse_parameter_groups_size is 1, it means that the groups size is " + "the number of parameters' gradient. If the fuse_parameter_groups_size is " + "-1, it means that there are only one group. The default value is 3, it is " + "an experimental value."); + +namespace paddle { +namespace framework { +namespace ir { +// SetFuseParameterGroupsSize and SetFuseParameterMemorySize are used in unit +// test, because it is invalid that seting 'FLAGS_fuse_parameter_memory_size' +// and 'FLAGS_fuse_parameter_groups_size' in unit test. +void SetFuseParameterGroupsSize(int group_size) { + FLAGS_fuse_parameter_groups_size = group_size; +} + +int GetFuseParameterGroupsSize() { return FLAGS_fuse_parameter_groups_size; } + +void SetFuseParameterMemorySize(uint64_t memory_size) { + FLAGS_fuse_parameter_memory_size = memory_size; +} + +uint64_t GetFuseParameterMemorySize() { + return FLAGS_fuse_parameter_memory_size; +} + +static const char kUnKnow[] = "@UNKNOW@"; +static framework::proto::VarType::Type kDefaultDtype = + framework::proto::VarType::Type::VarType_Type_BOOL; + +class AllocContinuousSpaceForGradPass : public ir::Pass { + protected: + void ApplyImpl(ir::Graph *graph) const { + ir::Graph &result = *graph; + + auto &places = Get>(details::kPlaces); + auto &local_scopes = Get>(details::kLocalScopes); + + ResetAttribute(details::kParamsAndGrads, &result); + ResetAttribute(details::kGroupGradsAndParams, + &result); + + // NOTE: The operator nodes should be in topology order. + std::vector topo_nodes = ir::TopologySortOperations(result); + auto ¶ms_grads = + result.Get(details::kParamsAndGrads); + for (auto &node : topo_nodes) { + RecordParamsAndGrads(node, ¶ms_grads); + } + + if (params_grads.size() == 0) { + VLOG(10) << "Doesn't find gradients"; + return; + } + + std::unordered_map vars; + for (ir::Node *node : result.Nodes()) { + if (node->IsVar() && node->Var()) { + // Note: The graph may have the same name node. For example, parameter + // is the input of operator and it also is the output of optimizer; + vars.emplace(node->Var()->Name(), node); + } + } + + auto &group_grads_params = + result.Get(details::kGroupGradsAndParams); + + // Note: the order of params_grads may be changed by SetGroupGradsAndParams. + SetGroupGradsAndParams(vars, params_grads, &group_grads_params); + + params_grads.clear(); + for (auto &group_p_g : group_grads_params) { + params_grads.insert(params_grads.begin(), group_p_g.begin(), + group_p_g.end()); + } + for (auto &p_g : params_grads) { + std::swap(p_g.first, p_g.second); + } + + // Set Gradients as Persistable to prevent this var becoming reusable. + auto dtype = kDefaultDtype; + for (auto &p_g : params_grads) { + // Get gradient var + auto iter = vars.find(p_g.second); + PADDLE_ENFORCE(iter != vars.end(), "%s is not found.", p_g.second); + iter->second->Var()->SetPersistable(true); + + PADDLE_ENFORCE(IsSupportedVarType(iter->second->Var()->GetType())); + + // Get Dtype + auto ele_dtype = iter->second->Var()->GetDataType(); + if (dtype == kDefaultDtype) { + dtype = ele_dtype; + PADDLE_ENFORCE_NE(ele_dtype, kDefaultDtype, + "The data type should not be bool."); + } + PADDLE_ENFORCE_EQ(ele_dtype, dtype, + "The data type of input is not consistent."); + } + + // Create a FusedVarsSet to avoid duplicating names for fused_var in other + // pass. + if (!result.Has(details::kFusedVars)) { + result.Set(details::kFusedVars, new details::FusedVars); + } + // the kFusedGrads is used be fuse_optimizer_op_pass. + result.Set(details::kFusedGrads, new details::FusedGrads); + + // the fused_var_name should be unique, so it appends + // params_grads.begin()->second. + auto fused_var_name = std::string(details::kFusedVarNamePrefix) + "@GRAD@" + + params_grads.begin()->second; + result.Get(details::kFusedGrads) = fused_var_name; + auto &fused_var_set = result.Get(details::kFusedVars); + PADDLE_ENFORCE_EQ(fused_var_set.count(fused_var_name), 0, + "%s is duplicate in FusedVars.", fused_var_name); + fused_var_set.insert(fused_var_name); + + InitFusedVarsAndAllocSpaceForVars(places, local_scopes, vars, + fused_var_name, params_grads); + } + + template + void ResetAttribute(const std::string &attr_name, ir::Graph *graph) const { + if (graph->Has(attr_name)) { + VLOG(10) << attr_name << " is reset."; + graph->Erase(attr_name); + } + graph->Set(attr_name, new AttrType); + } + + void SetGroupGradsAndParams( + const std::unordered_map &var_nodes, + const details::ParamsAndGrads ¶ms_grads, + details::GroupGradsAndParams *group_grads_params) const { + SetGroupAccordingToLayers(var_nodes, params_grads, group_grads_params); + SetGroupAccordingToMemorySize(var_nodes, group_grads_params); + SetGroupAccordingToGroupSize(var_nodes, group_grads_params); + } + + void SetGroupAccordingToLayers( + const std::unordered_map &var_nodes, + const details::ParamsAndGrads ¶ms_grads, + details::GroupGradsAndParams *group_grads_params) const { + std::unordered_map> layer_params; + + for (size_t i = 0; i < params_grads.size(); ++i) { + auto pos = params_grads[i].first.find_first_of("."); + if (pos == std::string::npos) { + layer_params[std::string(kUnKnow)].emplace_back(i); + } else { + layer_params[params_grads[i].first.substr(0, pos)].emplace_back(i); + } + } + + group_grads_params->reserve(layer_params.size()); + for (size_t i = 0; i < params_grads.size(); ++i) { + auto pos = params_grads[i].first.find_first_of("."); + std::string key = kUnKnow; + if (pos != std::string::npos) { + key = params_grads[i].first.substr(0, pos); + } + auto iter = layer_params.find(key); + if (iter == layer_params.end()) continue; + + group_grads_params->emplace_back(); + auto &local_group_grads_params = group_grads_params->back(); + for (auto &idx : iter->second) { + local_group_grads_params.emplace_back( + std::make_pair(params_grads[idx].second, params_grads[idx].first)); + } + layer_params.erase(iter); + } + + VLOG(10) << "SetGroupAccordingToLayers: "; + for (size_t i = 0; i < group_grads_params->size(); ++i) { + VLOG(10) << "group " << i; + std::stringstream out; + for (auto &p_g : group_grads_params->at(i)) { + out << "(" << p_g.second << ", " << p_g.first << "), "; + } + VLOG(10) << out.str(); + } + } + + void SetGroupAccordingToMemorySize( + const std::unordered_map &var_nodes, + details::GroupGradsAndParams *group_grads_params) const { + const uint64_t group_memory_size = GetFuseParameterMemorySize(); + if (group_memory_size == 0) { + return; + } + details::GroupGradsAndParams local_group_grads_params; + size_t j = 0; + while (j < group_grads_params->size()) { + local_group_grads_params.emplace_back(); + auto &group_p_g = local_group_grads_params.back(); + size_t local_group_memory_size = 0; + while (j < group_grads_params->size()) { + std::for_each( + group_grads_params->at(j).begin(), group_grads_params->at(j).end(), + [&local_group_memory_size, + &var_nodes](const std::pair &g_p) { + auto iter = var_nodes.find(g_p.second); + PADDLE_ENFORCE(iter != var_nodes.end(), "%s is not found.", + g_p.second); + auto shape = iter->second->Var()->GetShape(); + size_t size = + framework::SizeOfType(iter->second->Var()->GetDataType()); + std::for_each(shape.begin(), shape.end(), + [&size](const int64_t &n) { size *= n; }); + local_group_memory_size += size; + }); + group_p_g.insert(group_p_g.end(), group_grads_params->at(j).begin(), + group_grads_params->at(j).end()); + ++j; + if (local_group_memory_size >= group_memory_size) { + break; + } + } + } + + std::swap(*group_grads_params, local_group_grads_params); + + VLOG(10) << string::Sprintf( + "SetGroupAccordingToMemorySize(memory_size: %d):", group_memory_size); + for (size_t i = 0; i < group_grads_params->size(); ++i) { + VLOG(10) << "group " << i; + std::stringstream out; + for (auto &g_p : group_grads_params->at(i)) { + auto iter = var_nodes.find(g_p.second); + PADDLE_ENFORCE(iter != var_nodes.end(), "%s is not found.", g_p.second); + auto shape = iter->second->Var()->GetShape(); + size_t size = framework::SizeOfType(iter->second->Var()->GetDataType()); + std::for_each(shape.begin(), shape.end(), + [&size](const int64_t &n) { size *= n; }); + out << string::Sprintf("(%s(%d), %s)", g_p.second, size, g_p.first); + } + VLOG(10) << out.str(); + } + } + + void SetGroupAccordingToGroupSize( + const std::unordered_map &var_nodes, + details::GroupGradsAndParams *group_grads_params) const { + if (GetFuseParameterGroupsSize() == 1) { + return; + } + const int group_size = GetFuseParameterGroupsSize() == -1 + ? static_cast(group_grads_params->size()) + : GetFuseParameterGroupsSize(); + PADDLE_ENFORCE_GT(group_size, 1); + size_t groups = (group_grads_params->size() + group_size - 1) / group_size; + details::GroupGradsAndParams local_group_grads_params; + local_group_grads_params.reserve(groups); + + size_t j = 0; + for (size_t i = 0; i < groups; ++i) { + local_group_grads_params.emplace_back(); + auto &group_p_g = local_group_grads_params.back(); + group_p_g.reserve(group_size); + while (j < group_grads_params->size()) { + group_p_g.insert(group_p_g.end(), group_grads_params->at(j).begin(), + group_grads_params->at(j).end()); + ++j; + if (j % group_size == 0) break; + } + } + std::swap(*group_grads_params, local_group_grads_params); + + VLOG(10) << string::Sprintf("SetGroupAccordingToGroupSize(group_size: %d):", + group_size); + for (size_t i = 0; i < group_grads_params->size(); ++i) { + VLOG(10) << "group " << i; + std::stringstream out; + for (auto &p_g : group_grads_params->at(i)) { + out << "(" << p_g.second << ", " << p_g.first << "), "; + } + VLOG(10) << out.str(); + } + } + + private: + bool IsSupportedVarType(const proto::VarType::Type &type) const { + // Current only support LOD_TENSOR. + return type == proto::VarType::LOD_TENSOR; + } + + void RecordParamsAndGrads(ir::Node *node, + details::ParamsAndGrads *params_grads) const { + try { + bool is_bk_op = + static_cast(boost::get(node->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())) & + static_cast(OpRole::kBackward)); + if (!is_bk_op) return; + + // Currently, we assume that once gradient is generated, it can be + // broadcast, and each gradient is only broadcast once. + auto backward_vars = + boost::get>(node->Op()->GetNullableAttr( + OpProtoAndCheckerMaker::OpRoleVarAttrName())); + PADDLE_ENFORCE_EQ(backward_vars.size() % 2, static_cast(0)); + + for (size_t i = 0; i < backward_vars.size(); i += 2) { + VLOG(10) << "Trainable parameter: " << backward_vars[i] + << ", gradient: " << backward_vars[i + 1]; + + params_grads->emplace_back(std::make_pair( + backward_vars[i] /*param*/, backward_vars[i + 1] /*grad*/)); + } + } catch (boost::bad_get e) { + } + } + + void InitFusedVarsAndAllocSpaceForVars( + const std::vector &places, + const std::vector &local_scopes, + const std::unordered_map &vars, + const std::string &fused_var_name, + const details::ParamsAndGrads ¶ms_grads) const { + // Init Gradients and FusedVars + VLOG(10) << "Init FusedVars and Gradients."; + for (auto it = local_scopes.rbegin(); it != local_scopes.rend(); ++it) { + auto &scope = *it; + + PADDLE_ENFORCE(scope->FindVar(fused_var_name) == nullptr, + "%s has existed in scope.", fused_var_name); + scope->Var(fused_var_name)->GetMutable(); + + for (auto &p_g : params_grads) { + auto iter = vars.find(p_g.second); + PADDLE_ENFORCE(iter != vars.end()); + PADDLE_ENFORCE_NOT_NULL(iter->second->Var()); + PADDLE_ENFORCE_EQ(iter->second->Var()->GetType(), + proto::VarType::LOD_TENSOR); + scope->Var(p_g.second)->GetMutable(); + } + } + + // Alloc continuous space for vars. + std::vector grads_name; + std::vector params_name; + grads_name.reserve(params_grads.size()); + params_name.reserve(params_grads.size()); + for (auto &p_g : params_grads) { + params_name.emplace_back(p_g.first); + grads_name.emplace_back(p_g.second); + } + framework::ProgramDesc program_desc; + AppendAllocSpaceForVarsOp(params_name, grads_name, fused_var_name, + program_desc.MutableBlock(0)); + + for (size_t i = 0; i < local_scopes.size(); ++i) { + for (auto &op_desc : program_desc.Block(0).AllOps()) { + auto op = OpRegistry::CreateOp(*op_desc); + op->Run(*local_scopes[i], places[i]); + } + } + } + + void AppendAllocSpaceForVarsOp(const std::vector ¶ms_name, + const std::vector &grads_name, + const std::string &fused_var_name, + BlockDesc *global_block) const { + auto op_desc = global_block->AppendOp(); + op_desc->SetType("alloc_continuous_space"); + op_desc->SetInput("Input", params_name); + op_desc->SetOutput("Output", grads_name); + op_desc->SetOutput("FusedOutput", {fused_var_name}); + } +}; +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(alloc_continuous_space_for_grad_pass, + paddle::framework::ir::AllocContinuousSpaceForGradPass) + .RequirePassAttr(paddle::framework::details::kPlaces) + .RequirePassAttr(paddle::framework::details::kLocalScopes); diff --git a/paddle/fluid/framework/details/reference_count_pass.h b/paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h similarity index 68% rename from paddle/fluid/framework/details/reference_count_pass.h rename to paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h index 7bb01ee6161eda944006d8d3d0fe6e9f91befcee..b20eda96f0fb622ccd318d9418ddb15f2997f8e6 100644 --- a/paddle/fluid/framework/details/reference_count_pass.h +++ b/paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h @@ -1,4 +1,4 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,21 +11,19 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - #pragma once - -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/pass.h" +#include namespace paddle { namespace framework { -namespace details { +namespace ir { + +void SetFuseParameterGroupsSize(int group_size); +int GetFuseParameterGroupsSize(); -class ReferenceCountPass : public ir::Pass { - protected: - void ApplyImpl(ir::Graph* graph) const override; -}; +void SetFuseParameterMemorySize(uint64_t memory_size); +uint64_t GetFuseParameterMemorySize(); -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc index ca008763bff8ff89d5dba02e483090f2bec77592..cd8030519ccfcfab3741424e8a60e9c29b698593 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc @@ -48,17 +48,37 @@ void FCFusePass::ApplyImpl(ir::Graph* graph) const { GET_IR_NODE_FROM_SUBGRAPH(elementwise_add, elementwise_add, fc_pattern); GET_IR_NODE_FROM_SUBGRAPH(mul_out, mul_out, fc_pattern); + auto base_op_desc = mul->Op(); // Create an FC Node. + // OpDesc desc(base_op_desc, nullptr); OpDesc desc; std::string fc_x_in = subgraph.at(x)->Name(); std::string fc_Y_in = w->Name(); std::string fc_bias_in = fc_bias->Name(); std::string fc_out_out = fc_out->Name(); + desc.SetInput("Input", std::vector({fc_x_in})); desc.SetInput("W", std::vector({fc_Y_in})); desc.SetInput("Bias", std::vector({fc_bias_in})); desc.SetOutput("Out", std::vector({fc_out_out})); desc.SetAttr("in_num_col_dims", mul->Op()->GetAttr("x_num_col_dims")); + + // For anakin subgraph int8 + // When in anakin subgraph int8 mode, the pattern like "fake_quant + mul + + // fake_dequant" + // can be detected by the quant_dequant_fuse_pass. This pass will add + // "input_scale", + // "weight_scale" which are extracted from fake_quant op and fake_dequant op + // to mul op, + // and then delete the fake_quant op and fake_dequant op in the graph. If + // the mul op + // has the scale info, we should add those to the fused fc. + if (base_op_desc->HasAttr("enable_int8")) { + desc.SetAttr("enable_int8", base_op_desc->GetAttr("enable_int8")); + desc.SetAttr("input_scale", base_op_desc->GetAttr("input_scale")); + desc.SetAttr("weight_scale", base_op_desc->GetAttr("weight_scale")); + } + desc.SetType("fc"); auto fc_node = g->CreateOpNode(&desc); // OpDesc will be copied. GraphSafeRemoveNodes(graph, {mul, elementwise_add, mul_out}); diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/CMakeLists.txt b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..22876e962a033f391e54e396701c06fe826f7821 --- /dev/null +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/CMakeLists.txt @@ -0,0 +1,4 @@ +cc_library(fuse_optimizer_op_pass SRCS fuse_optimizer_op_pass.cc DEPS graph graph_helper) +cc_library(fuse_adam_op_pass SRCS fuse_adam_op_pass.cc DEPS fuse_optimizer_op_pass) +cc_library(fuse_sgd_op_pass SRCS fuse_sgd_op_pass.cc DEPS fuse_optimizer_op_pass) +cc_library(fuse_momentum_op_pass SRCS fuse_momentum_op_pass.cc DEPS fuse_optimizer_op_pass) diff --git a/paddle/fluid/framework/details/fuse_adam_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc similarity index 95% rename from paddle/fluid/framework/details/fuse_adam_op_pass.cc rename to paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc index 26315009f8b6b9835fd747af1a62dece91ca1e20..504ff04cfed267ad4fba795672b2809042fe52a3 100644 --- a/paddle/fluid/framework/details/fuse_adam_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc @@ -16,16 +16,13 @@ #include #include #include -#include "paddle/fluid/framework/details/build_strategy.h" -#include "paddle/fluid/framework/details/fuse_optimizer_op_pass.h" -#include "paddle/fluid/framework/details/multi_devices_helper.h" -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/graph_helper.h" + +#include "paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h" #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace framework { -namespace details { +namespace ir { class FuseAdamOpPass : public FuseOptimizerOpPass { private: @@ -203,10 +200,10 @@ class FuseAdamOpPass : public FuseOptimizerOpPass { } } }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -REGISTER_PASS(fuse_adam_op_pass, paddle::framework::details::FuseAdamOpPass) +REGISTER_PASS(fuse_adam_op_pass, paddle::framework::ir::FuseAdamOpPass) .RequirePassAttr(paddle::framework::details::kPlaces) .RequirePassAttr(paddle::framework::details::kLocalScopes); diff --git a/paddle/fluid/framework/details/fuse_momentum_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc similarity index 91% rename from paddle/fluid/framework/details/fuse_momentum_op_pass.cc rename to paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc index c287cdfd090e4adfa29b6237e3e030249812fee1..3ac92d176274461fd548b0f6b7b3e1c632cdaa76 100644 --- a/paddle/fluid/framework/details/fuse_momentum_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc @@ -16,14 +16,13 @@ #include #include #include -#include "paddle/fluid/framework/details/build_strategy.h" -#include "paddle/fluid/framework/details/fuse_optimizer_op_pass.h" -#include "paddle/fluid/framework/ir/graph_helper.h" + +#include "paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h" #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace framework { -namespace details { +namespace ir { class FuseMomentumOpPass : public FuseOptimizerOpPass { private: @@ -84,11 +83,10 @@ class FuseMomentumOpPass : public FuseOptimizerOpPass { } }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -REGISTER_PASS(fuse_momentum_op_pass, - paddle::framework::details::FuseMomentumOpPass) +REGISTER_PASS(fuse_momentum_op_pass, paddle::framework::ir::FuseMomentumOpPass) .RequirePassAttr(paddle::framework::details::kPlaces) .RequirePassAttr(paddle::framework::details::kLocalScopes); diff --git a/paddle/fluid/framework/details/fuse_optimizer_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc similarity index 91% rename from paddle/fluid/framework/details/fuse_optimizer_op_pass.cc rename to paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc index 312fc89470f0ee212f07536c6d9eb55fb70e64ec..f636bcd0eae9ea08699ef5706747c2c2f4766ed3 100644 --- a/paddle/fluid/framework/details/fuse_optimizer_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/fuse_optimizer_op_pass.h" +#include "paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h" #include #include #include "paddle/fluid/framework/ir/graph_helper.h" @@ -20,13 +20,13 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const { ir::Graph &result = *graph; - auto &places = Get>(kPlaces); - auto &local_scopes = Get>(kLocalScopes); + auto &places = Get>(details::kPlaces); + auto &local_scopes = Get>(details::kLocalScopes); const std::string fuse_op_type = GetOpType(); std::vector aux_var_names = GetAuxiliaryVarNames(); @@ -47,24 +47,24 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const { return; } - if (result.Has(kFusedOptType)) { + if (result.Has(details::kFusedOptType)) { VLOG(6) << "Currently only support fusing one type optimizer op. Has fused " - << result.Get(kFusedOptType); + << result.Get(details::kFusedOptType); return; } else { - result.Set(kFusedOptType, new FusedOptType); + result.Set(details::kFusedOptType, new details::FusedOptType); } - result.Get(kFusedOptType) = fuse_op_type; + result.Get(details::kFusedOptType) = fuse_op_type; // Step 2: Insert fused_var_name to FusedVars, and the FusedVars need be // initialized in scopes before execution. - if (!result.Has(kFusedVars)) { - result.Set(kFusedVars, new FusedVars); + if (!result.Has(details::kFusedVars)) { + result.Set(details::kFusedVars, new details::FusedVars); } std::unordered_map fused_vars_name; fused_vars_name.reserve(aux_var_names.size()); - auto &fused_var_set = result.Get(kFusedVars); - const std::string prefix(kFusedVarNamePrefix); + auto &fused_var_set = result.Get(details::kFusedVars); + const std::string prefix(details::kFusedVarNamePrefix); // NOTE: the fused_var_name should be unique. for (auto &var_name : aux_var_names) { auto fused_var_name = prefix + "_" + fuse_op_type + "_" + var_name + "_" + @@ -77,8 +77,9 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const { // Step 3: Get the fused Gradient's name bool grad_fused = false; - if (result.Has(kParamsAndGrads)) { - auto ¶ms_grads = result.Get(kParamsAndGrads); + if (result.Has(details::kParamsAndGrads)) { + auto ¶ms_grads = + result.Get(details::kParamsAndGrads); PADDLE_ENFORCE_EQ( params_grads.size(), aux_var_set.at(kGrad).size(), "The number of gradients and optimizer ops is not equal."); @@ -94,13 +95,13 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const { // NOTE(zcd): the gradient of kParamsAndGrads may be different with the // kGrad. if (same_grad_num == aux_var_set.at(kGrad).size()) { - if (!result.Has(kFusedGrads)) { + if (!result.Has(details::kFusedGrads)) { PADDLE_THROW( "The alloc_continuous_space_for_grad_pass should be called before " "this pass."); } - auto &fused_grad = result.Get(kFusedGrads); - auto &fused_vars = result.Get(kFusedVars); + auto &fused_grad = result.Get(details::kFusedGrads); + auto &fused_vars = result.Get(details::kFusedVars); auto iter = std::find(fused_vars.begin(), fused_vars.end(), fused_grad); PADDLE_ENFORCE(iter != fused_vars.end(), "Not find the fused_grad."); fused_vars_name[kGrad] = fused_grad; @@ -323,6 +324,6 @@ void FuseOptimizerOpPass::InserInputAndOutputForOptOps( opt_node->outputs.insert(opt_node->outputs.begin(), outputs.begin(), outputs.end()); } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/fuse_optimizer_op_pass.h b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h similarity index 98% rename from paddle/fluid/framework/details/fuse_optimizer_op_pass.h rename to paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h index 47efc1693dd31ca88787da3a9d6d06aa7ef65786..6271ca9acb62a2607c2bbf9b50191f5340ae847e 100644 --- a/paddle/fluid/framework/details/fuse_optimizer_op_pass.h +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h @@ -25,7 +25,7 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { constexpr char kGrad[] = "Grad"; constexpr char kParam[] = "Param"; @@ -90,6 +90,6 @@ class FuseOptimizerOpPass : public ir::Pass { const std::string &fused_var_name) const; }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/fuse_sgd_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc similarity index 85% rename from paddle/fluid/framework/details/fuse_sgd_op_pass.cc rename to paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc index 4dd1860e25a44e168aa2e020060bc8ffc332f39e..077e393c105dadf0e87d64f520fe3a65b88c6972 100644 --- a/paddle/fluid/framework/details/fuse_sgd_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc @@ -14,18 +14,13 @@ #include #include #include -#include #include -#include "paddle/fluid/framework/details/build_strategy.h" -#include "paddle/fluid/framework/details/fuse_optimizer_op_pass.h" -#include "paddle/fluid/framework/details/multi_devices_helper.h" -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.h" #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace framework { -namespace details { +namespace ir { class FuseSgdOpPass : public FuseOptimizerOpPass { private: @@ -66,10 +61,10 @@ class FuseSgdOpPass : public FuseOptimizerOpPass { InserInputAndOutputForOptOps(sgd_ops, sgd_node); } }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -REGISTER_PASS(fuse_sgd_op_pass, paddle::framework::details::FuseSgdOpPass) +REGISTER_PASS(fuse_sgd_op_pass, paddle::framework::ir::FuseSgdOpPass) .RequirePassAttr(paddle::framework::details::kPlaces) .RequirePassAttr(paddle::framework::details::kLocalScopes); diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc index 6a9340b870df324f7dea03181bdb2b097e13e705..5eba32c4f3a846183d9bbad51b77a29cfca677f0 100644 --- a/paddle/fluid/framework/ir/graph.cc +++ b/paddle/fluid/framework/ir/graph.cc @@ -13,10 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include +#include #include +#include +#include #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/op_proto_maker.h" +#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/var_desc.h" @@ -61,7 +66,16 @@ std::map> Graph::InitFromProgram( var->outputs.push_back(node); } // For output args, always create a new var. + std::unordered_set out_arg_set; for (auto &each_var_name : op->OutputArgumentNames()) { + if (each_var_name != kEmptyVarName) { + PADDLE_ENFORCE(out_arg_set.count(each_var_name) == 0, + "Program is wrong. %s occurs in output of %s several " + "times.", + each_var_name, op->Type()); + out_arg_set.insert(each_var_name); + } + ir::Node *var = nullptr; if (all_vars.count(each_var_name) != 0) { var = CreateVarNode(all_vars.at(each_var_name)); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index 8468f9ccc12a017ebe4fe73581e7bbce00dd626d..0dcf064902d1c1c6cb034421cedea0387b6e0505 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1640,7 +1640,8 @@ PDNode *patterns::FillConstantElementWiseMulFuse::operator()( void patterns::QuantDequantOpFuse::operator()(PDNode *quant_op_input, const std::string &op_type, const std::string &weight_name, - int times) { + int times, + const std::string &quant_type) { const int kNumFields = 5; const int kQuantizedWeightOffset = 0; const int kQuantizedOpOffset = 1; @@ -1648,24 +1649,22 @@ void patterns::QuantDequantOpFuse::operator()(PDNode *quant_op_input, const int kDequantOpOffset = 3; const int kDequantOpOutOffset = 4; // the quant op always be one. - auto quant_op_in_scale = - pattern->NewNode(GetNodeName("quant_op_in_scale")) - ->assert_is_op_input("fake_quantize_range_abs_max", "InScale") - ->AsInput(); - auto quant_op = pattern->NewNode(GetNodeName("quant_op")) - ->assert_is_op("fake_quantize_range_abs_max"); + auto quant_op_in_scale = pattern->NewNode(GetNodeName("quant_op_in_scale")) + ->assert_is_op_input(quant_type, "InScale") + ->AsInput(); + auto quant_op = + pattern->NewNode(GetNodeName("quant_op"))->assert_is_op(quant_type); auto quant_op_out_scale = pattern->NewNode(GetNodeName("quant_op_out_scale")) - ->assert_is_op_output("fake_quantize_range_abs_max", "OutScale") + ->assert_is_op_output(quant_type, "OutScale") ->assert_is_op_input("fake_dequantize_max_abs", "Scale") ->AsIntermediate(); - auto quant_op_out = - pattern->NewNode(GetNodeName("quant_op_out")) - ->assert_is_op_output("fake_quantize_range_abs_max", "Out") - ->assert_is_op_input(op_type) - ->AsIntermediate(); + auto quant_op_out = pattern->NewNode(GetNodeName("quant_op_out")) + ->assert_is_op_output(quant_type, "Out") + ->assert_is_op_input(op_type) + ->AsIntermediate(); // there are 'times' quantized and dequant op std::vector nodes; @@ -1707,6 +1706,37 @@ void patterns::QuantDequantOpFuse::operator()(PDNode *quant_op_input, } } +void patterns::ShuffleChannelPattern::operator()(PDNode *reshape1_in) { + auto reshape1_op = + pattern->NewNode(reshape1_op_repr())->assert_is_op("reshape2"); + + auto reshape1_out = pattern->NewNode(reshape1_out_repr()) + ->assert_is_op_output("reshape2", "Out") + ->assert_is_op_input("transpose2") + ->AsIntermediate(); + + auto transpose_op = + pattern->NewNode(transpose_op_repr())->assert_is_op("transpose2"); + + auto transpose_out = pattern->NewNode(transpose_out_repr()) + ->assert_is_op_output("transpose2", "Out") + ->assert_is_op_input("reshape2") + ->AsIntermediate(); + + auto reshape2_op = + pattern->NewNode(reshape2_op_repr())->assert_is_op("reshape2"); + auto reshape2_out = pattern->NewNode(reshape2_out_repr()) + ->assert_is_op_output("reshape2", "Out") + ->AsOutput(); + + reshape1_op->LinksFrom({reshape1_in}); + reshape1_out->LinksFrom({reshape1_op}); + transpose_op->LinksFrom({reshape1_out}); + transpose_out->LinksFrom({transpose_op}); + reshape2_op->LinksFrom({transpose_out}); + reshape2_out->LinksFrom({reshape2_op}); +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index a5ac3a0c3733cf610159c6367d04f3323b797c50..907371b56b06dcd66297adedea6c17b61d9b5e38 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -880,7 +880,8 @@ struct QuantDequantOpFuse : public PatternBase { : PatternBase(pattern, name_scope, "quant_dequant_fuse") {} void operator()(PDNode* quant_op_input, const std::string& op_name, - const std::string& weight_name, int times = 1); + const std::string& weight_name, int times, + const std::string& quant_type); std::string GetNodeName(const std::string& op_type) { return PDNodeName(name_scope_, repr_, id_, op_type); @@ -891,6 +892,21 @@ struct QuantDequantOpFuse : public PatternBase { } }; +struct ShuffleChannelPattern : public PatternBase { + ShuffleChannelPattern(PDPattern* pattern, const std::string& name_scope) + : PatternBase(pattern, name_scope, "shufflechannel_pattern") {} + + void operator()(PDNode* reshape1_in); + + PATTERN_DECL_NODE(reshape1_op); + PATTERN_DECL_NODE(reshape1_out); + + PATTERN_DECL_NODE(transpose_op); + PATTERN_DECL_NODE(transpose_out); + PATTERN_DECL_NODE(reshape2_op); + PATTERN_DECL_NODE(reshape2_out); +}; + } // namespace patterns // Link two ir::Nodes from each other. diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/CMakeLists.txt b/paddle/fluid/framework/ir/memory_optimize_pass/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..125cd462fa4b043464c314c97e090bbc0cb6d422 --- /dev/null +++ b/paddle/fluid/framework/ir/memory_optimize_pass/CMakeLists.txt @@ -0,0 +1,18 @@ +cc_library(op_graph_view SRCS op_graph_view.cc DEPS op_handle_base) +cc_library(while_op_eager_deletion_pass SRCS while_op_eager_deletion_pass.cc DEPS while_op_helper graph_helper pass computation_op_handle) +cc_library(reference_count_pass_helper SRCS reference_count_pass_helper.cc DEPS garbage_collector computation_op_handle var_handle) +cc_library(reference_count_pass SRCS reference_count_pass.cc DEPS computation_op_handle graph graph_helper pass op_graph_view reference_count_pass_helper) + +if(WITH_GPU) + cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper gpu_info) +else() + cc_library(memory_optimize_helper SRCS memory_optimize_helper.cc DEPS graph graph_helper cpu_info) +endif() + +cc_library(memory_optimize_pass SRCS memory_optimize_pass.cc DEPS memory_optimize_helper pass) +cc_library(inplace_op_pass SRCS inplace_op_pass.cc DEPS memory_optimize_pass op_info) + +cc_test(memory_optimize_helper_test SRCS memory_optimize_helper_test.cc memory_optimize_helper.cc DEPS framework_proto graph graph_helper op_registry) + +cc_library(eager_deletion_pass SRCS eager_deletion_pass.cc DEPS computation_op_handle eager_deletion_op_handle graph graph_helper pass while_op_eager_deletion_pass reference_count_pass_helper) +cc_library(record_skip_memory_opt_vars_pass SRCS record_skip_memory_opt_vars_pass.cc DEPS graph graph_helper) diff --git a/paddle/fluid/framework/details/eager_deletion_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/eager_deletion_pass.cc similarity index 82% rename from paddle/fluid/framework/details/eager_deletion_pass.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/eager_deletion_pass.cc index 5ea18efe5c3f8f95fcbd724a522fda9638e65a52..1cdc97338ae8d0745e877071b7939c5c3d9c955c 100644 --- a/paddle/fluid/framework/details/eager_deletion_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/eager_deletion_pass.cc @@ -27,11 +27,11 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { // op -> variables which can be deleted after op runs -using OpToVarNameSetMap = - std::unordered_map>; +using OpToVarNameSetMap = std::unordered_map>; static std::map> VarsGroupByScopeIdx( const OpToVarNameSetMap &map) { @@ -53,7 +53,8 @@ static bool IsLoDTensor(VarDesc *var) { // Get memory size of LoDTensor static int64_t GetMemorySize( - const std::unordered_map> &vars, + const std::unordered_map> + &vars, const std::string &var_name) { auto *var_desc = TryGetLatestVarDesc(vars.at(var_name)); PADDLE_ENFORCE_NOT_NULL(var_desc); @@ -69,13 +70,13 @@ static int64_t GetMemorySize( // Since partial GC is based on static analysis of memory size of each variable // So we should skip SelectedRows and LoDTensorArray here static void SplitIntoLoDTensorAndNonLoDTensorVars( - const OpToVarNameSetMap &m, const GraphVars &vars, + const OpToVarNameSetMap &m, const details::GraphVars &vars, OpToVarNameSetMap *lod_tensors, OpToVarNameSetMap *other_vars) { lod_tensors->clear(); other_vars->clear(); for (auto &op_vars_pair : m) { - for (auto &var_name : op_vars_pair.second) { + for (auto var_name : op_vars_pair.second) { auto *var_desc = TryGetLatestVarDesc( vars[op_vars_pair.first->GetScopeIdx()].at(var_name)); if (IsLoDTensor(var_desc)) { @@ -89,23 +90,24 @@ static void SplitIntoLoDTensorAndNonLoDTensorVars( struct GCVarInfo { GCVarInfo(const std::string &name, int64_t memory_size, - ComputationOpHandle *op, size_t scope_idx) + details::ComputationOpHandle *op, size_t scope_idx) : name_(name), memory_size_(memory_size), op_(op), scope_idx_(scope_idx) {} - std::string name_; // variable name - int64_t memory_size_; // memory size - ComputationOpHandle *op_; // op after which the variable could be deleted - size_t scope_idx_; // scope index where the variable locates + std::string name_; // variable name + int64_t memory_size_; // memory size + details::ComputationOpHandle + *op_; // op after which the variable could be deleted + size_t scope_idx_; // scope index where the variable locates int64_t AbsMemorySize() const { return std::abs(memory_size_); } }; // Delete delete_lod_tensor_only is not used currently static OpToVarNameSetMap ShrinkGCVars( - const OpToVarNameSetMap &m, const GraphVars &vars, + const OpToVarNameSetMap &m, const details::GraphVars &vars, const std::vector &places, double fraction_of_memory_size, bool delete_lod_tensor_only = false) { // Do not perform gc when fraction_of_memory_size = 0 @@ -192,7 +194,7 @@ void EagerDeletionPass::ApplyImpl(ir::Graph *graph) const { PADDLE_ENFORCE(ref_cnts.empty(), "kRuntimeReferenceCount should be initialized here!"); - const auto &vars = graph->Get(kGraphVars); + const auto &vars = graph->Get(details::kGraphVars); ref_cnts.resize(vars.size()); const auto &last_live_ops = @@ -222,27 +224,31 @@ void EagerDeletionPass::ApplyImpl(ir::Graph *graph) const { auto *eager_deletion_node = graph->CreateEmptyNode("eager_deletion", ir::Node::Type::kOperation); - auto *eager_deletion_op = new EagerDeletionOpHandle( + auto *eager_deletion_op = new details::EagerDeletionOpHandle( eager_deletion_node, op->GetScope(), op->GetPlace(), var_names, gcs.at(places[op->GetScopeIdx()]).get(), &(ref_cnts[op->GetScopeIdx()])); auto it = std::find_if( - op->Outputs().begin(), op->Outputs().end(), [](VarHandleBase *var) { - return dynamic_cast(var) != nullptr; + op->Outputs().begin(), op->Outputs().end(), + [](details::VarHandleBase *var) { + return dynamic_cast(var) != nullptr; }); if (it != op->Outputs().end()) { eager_deletion_op->AddInput(*it); } else { - auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar()); - graph->Get(kGraphDepVars).emplace(dep_var); + auto *dep_var = new details::DummyVarHandle(graph->CreateControlDepVar()); + graph->Get(details::kGraphDepVars) + .emplace(dep_var); op->AddOutput(dep_var); eager_deletion_op->AddInput(dep_var); } - auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar()); - graph->Get(kGraphDepVars).emplace(dummy_leaf); + auto *dummy_leaf = + new details::DummyVarHandle(graph->CreateControlDepVar()); + graph->Get(details::kGraphDepVars) + .emplace(dummy_leaf); eager_deletion_op->AddOutput(dummy_leaf); } @@ -262,15 +268,14 @@ void EagerDeletionPass::ApplyImpl(ir::Graph *graph) const { while_op_eager_deletion_pass->Apply(graph); } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -REGISTER_PASS(eager_deletion_pass, - paddle::framework::details::EagerDeletionPass) - .RequirePassAttr(paddle::framework::details::kRuntimeReferenceCount) - .RequirePassAttr(paddle::framework::details::kLastLiveOpsOfVars) - .RequirePassAttr(paddle::framework::details::kAllPlaces) - .RequirePassAttr(paddle::framework::details::kGarbageCollector); +REGISTER_PASS(eager_deletion_pass, paddle::framework::ir::EagerDeletionPass) + .RequirePassAttr(paddle::framework::ir::kRuntimeReferenceCount) + .RequirePassAttr(paddle::framework::ir::kLastLiveOpsOfVars) + .RequirePassAttr(paddle::framework::ir::kAllPlaces) + .RequirePassAttr(paddle::framework::ir::kGarbageCollector); USE_PASS(while_op_eager_deletion_pass); diff --git a/paddle/fluid/framework/details/inplace_op_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/inplace_op_pass.cc similarity index 91% rename from paddle/fluid/framework/details/inplace_op_pass.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/inplace_op_pass.cc index 9313d9958ddb42cf2f72ac744006e56497ade676..ed746ea988e82dff23257996f688e55c56f09168 100644 --- a/paddle/fluid/framework/details/inplace_op_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/inplace_op_pass.cc @@ -16,9 +16,9 @@ #include #include #include -#include "paddle/fluid/framework/details/memory_optimize_pass.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.h" #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/op_info.h" @@ -52,7 +52,7 @@ DECLARE_string(memory_optimize_debug); namespace paddle { namespace framework { -namespace details { +namespace ir { // clang-format off const std::string kInplacedOpWhiteList[] = { // NOLINT @@ -111,10 +111,14 @@ class InplacePass : public ir::Pass { // Check whether all `ops` is the preceding ops of `op` bool CheckOpDeps(ir::Node *op, const std::vector &ops) const; - // Find nodes whose name are equal to the given name + // Find nodes whose names are equal to the given name static std::unordered_set FindNodesByName( const std::string &name, const std::vector &nodes); + // Collect inputs and outputs of op_desc + static void CollectInputArgsOfOpDesc( + const OpDesc *op_desc, std::unordered_multiset *in_args); + // Get all versions vars named var_name std::vector *AllVersionVars(const std::string &var_name) const; @@ -195,43 +199,12 @@ bool InplacePass::CheckOpDeps(ir::Node *op, void InplacePass::CollectSkipVars(ir::Graph *graph, const std::vector &ops) const { // 1. Collect op role vars - PADDLE_ENFORCE(graph->Has(details::kMemOptSkipVars), - "Graph should have attr %s", details::kMemOptSkipVars); + PADDLE_ENFORCE(graph->Has(kMemOptSkipVars), "Graph should have attr %s", + kMemOptSkipVars); auto &mem_opt_whitelist = graph->Get(kMemOptSkipVars); for (const auto &var : mem_opt_whitelist) { skip_vars_.emplace(var); } - - // 2. track the nodes which used by parameter server. - // these node can not be inplaced, otherwise trainer - // pserver can not find each other's name. - // Also check the ops which has sub-block - auto update_skip_set = [&](ir::Node *node) { - for (auto &in : node->inputs) { - if (in->IsVar() && in->Var() != nullptr) { - skip_vars_.emplace(in->Name()); - } - } - for (auto &out : node->outputs) { - if (out->IsVar() && out->Var() != nullptr) { - skip_vars_.emplace(out->Name()); - } - } - }; - - for (auto *node : ops) { - if (!node->IsOp()) continue; - // avoid optimizing the variable used in sub-blocks - if (OpHasSubBlock(node->Op())) { - update_skip_set(node); - continue; - } - - auto node_name = node->Name(); - if (node_name == "send" || node_name == "recv" || node_name == "prefetch") { - update_skip_set(node); - } - } } void InplacePass::RenameInOut(ir::Node *op, ir::Node *in_var, @@ -301,6 +274,14 @@ std::unordered_set InplacePass::FindNodesByName( return ret; } +void InplacePass::CollectInputArgsOfOpDesc( + const OpDesc *op_desc, std::unordered_multiset *in_args) { + in_args->clear(); + for (auto &in_name : op_desc->InputArgumentNames()) { + in_args->insert(in_name); + } +} + void InplacePass::ApplyImpl(ir::Graph *graph) const { // Step 1: topo sort ops, collect skip vars auto ops = ir::TopologySortOperations(*graph); @@ -346,6 +327,11 @@ void InplacePass::ApplyImpl(ir::Graph *graph) const { } auto in_to_outs = infer_inplace(*op_desc, use_cuda); + if (in_to_outs.empty()) continue; + + std::unordered_multiset all_in_args; + CollectInputArgsOfOpDesc(op_desc, &all_in_args); + for (auto &pair : in_to_outs) { auto &in_param = pair.first; auto &out_param = pair.second; @@ -387,6 +373,14 @@ void InplacePass::ApplyImpl(ir::Graph *graph) const { continue; } + size_t in_arg_occur_times = all_in_args.count(in_arg); + if (in_arg_occur_times > 1) { + VLOG(4) << "Cannot inplace because Input(" << in_param << ")=" << in_arg + << " occurs " << in_arg_occur_times << " times in input of op " + << op_type; + continue; + } + auto in_nodes = FindNodesByName(in_arg, op_node->inputs); PADDLE_ENFORCE(!in_nodes.empty(), "Input(%s)=%s cannot be found in op %s", in_param, in_arg, op_type); @@ -458,8 +452,7 @@ void InplacePass::ApplyImpl(ir::Graph *graph) const { continue; } - if (details::NodeSize(*in_node->Var()) != - details::NodeSize(*out_node->Var()) && + if (NodeSize(*in_node->Var()) != NodeSize(*out_node->Var()) && kSameShapeOpWhiteSet.count(op_desc->Type()) == 0) { VLOG(4) << "Cannot inplace because Input(" << in_param << ")=" << in_arg << " is not the same size with " @@ -482,9 +475,9 @@ void InplacePass::ApplyImpl(ir::Graph *graph) const { } } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -REGISTER_PASS(inplace_pass, paddle::framework::details::InplacePass) - .RequirePassAttr(paddle::framework::details::kUseCuda); +REGISTER_PASS(inplace_pass, paddle::framework::ir::InplacePass) + .RequirePassAttr(paddle::framework::ir::kUseCuda); diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.cc similarity index 98% rename from paddle/fluid/framework/details/memory_optimize_helper.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.cc index 1af57dc4087d2fd734c43e9549a4bd4526af4d35..0437de68687d8dc9eee3249ee438f2d907f8fe40 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/memory_optimize_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h" #include #include #include @@ -32,14 +32,15 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { using paddle::framework::VarDesc; std::vector SortOpLikeDescOrder(const ir::Graph& graph) { - PADDLE_ENFORCE(graph.Has(kStaleProgramOpDescs), + PADDLE_ENFORCE(graph.Has(details::kStaleProgramOpDescs), "Graph has no attribute of kStaleProgramOpDescs."); // 1. get op desc order - auto& op_descs = graph.Get>(kStaleProgramOpDescs); + auto& op_descs = + graph.Get>(details::kStaleProgramOpDescs); // 2. topology sort order auto nodes = graph.Nodes(); @@ -563,6 +564,6 @@ ir::Node* ControlFlowGraph::GetNodeByName(const std::string& name, return found_node; } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_helper.h b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h similarity index 99% rename from paddle/fluid/framework/details/memory_optimize_helper.h rename to paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h index 3ef407e4e9c3cf93b17f7d53c9730728053ef87b..cf9f4ef427ea63ab958395df776d7c9379a3d72c 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.h +++ b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h @@ -29,7 +29,7 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { /// this attribute is used to avoid some core variables removed/reused /// in memory optimize related passes @@ -184,6 +184,6 @@ void FilterVariables(const Container& nodes, Callback callback) { FilterVariableImpl()(nodes, callback); } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_helper_test.cc b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper_test.cc similarity index 99% rename from paddle/fluid/framework/details/memory_optimize_helper_test.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper_test.cc index 3fb02f69b1bb65a74a2e5f69e9de7994b4d012db..d38facd01950936c5ee7fb337ddce89d1bfd7209 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper_test.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper_test.cc @@ -11,8 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - -#include "paddle/fluid/framework/details/memory_optimize_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h" #include #include #include @@ -32,7 +31,7 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { TEST(OrderedSet, Normal) { OrderedSet pool; @@ -153,7 +152,7 @@ TEST(OrderedSet, FindBestFitNode) { ASSERT_TRUE(cache == nullptr); } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle @@ -188,7 +187,7 @@ REGISTER_OPERATOR(dummy, paddle::framework::DummyOp, namespace paddle { namespace framework { -namespace details { +namespace ir { inline static ProgramDesc FillProgramDesc() { ProgramDesc prog; @@ -521,6 +520,6 @@ TEST(SortOpLikeDescOrder, AddAndReplaceOpDescInplace) { } } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/memory_optimize_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.cc similarity index 90% rename from paddle/fluid/framework/details/memory_optimize_pass.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.cc index ef36f1038e27770498d66663a0051dbf8f559f93..8d5271b5081d0011dd653c40685d6a0bec0d5f48 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/memory_optimize_pass.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.h" #include #include #include @@ -42,12 +42,12 @@ DEFINE_string(memory_optimize_debug, "", namespace paddle { namespace framework { -namespace details { +namespace ir { void MemoryOptimizePass::ApplyImpl(ir::Graph* graph) const { CollectSkipVarsSet(graph); - cfg_.reset(new details::ControlFlowGraph(*graph)); + cfg_.reset(new ControlFlowGraph(*graph)); cfg_->LiveVariableAnalysis(); InitSSAGraphNodes(); @@ -205,30 +205,10 @@ void MemoryOptimizePass::SubGraphOptimize(OpDesc* op_desc) const { void MemoryOptimizePass::CollectSkipVarsSet(ir::Graph* graph) const { // fill skip_set_ - PADDLE_ENFORCE(graph->Has(details::kMemOptSkipVars)); + PADDLE_ENFORCE(graph->Has(kMemOptSkipVars)); auto& mem_opt_whitelist = graph->Get(kMemOptSkipVars); - for (const auto& var : mem_opt_whitelist) skip_set_.emplace(var); - - auto update_skip_set = [&](OpDesc* op_desc) { - auto inputs = op_desc->InputArgumentNames(); - auto outputs = op_desc->OutputArgumentNames(); - skip_set_.insert(inputs.begin(), inputs.end()); - skip_set_.insert(outputs.begin(), outputs.end()); - }; - - auto nodes = graph->Nodes(); - for (auto& op : nodes) { - if (!op->IsOp() || op->Op() == nullptr) continue; - auto* op_desc = op->Op(); - // NOTE(dzhwinter): - // current block can not reuse next level block vars. - if (OpHasSubBlock(op_desc)) update_skip_set(op_desc); - // NOTE(dzhwinter): - // distributed ops input/output name need to - // keep same bettwen trainer/pserver - if (op_desc->Type() == "send") update_skip_set(op_desc); - if (op_desc->Type() == "recv") update_skip_set(op_desc); - if (op_desc->Type() == "prefetch") update_skip_set(op_desc); + for (const auto& var : mem_opt_whitelist) { + skip_set_.emplace(var); } } @@ -336,10 +316,9 @@ void MemoryOptimizePass::RenameVarInGraphNode(const std::string& var, } } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -REGISTER_PASS(memory_optimize_pass, - paddle::framework::details::MemoryOptimizePass) +REGISTER_PASS(memory_optimize_pass, paddle::framework::ir::MemoryOptimizePass) .RequireGraphAttr(paddle::framework::details::kStaleProgramOpDescs); diff --git a/paddle/fluid/framework/details/memory_optimize_pass.h b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.h similarity index 94% rename from paddle/fluid/framework/details/memory_optimize_pass.h rename to paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.h index fa5b9b322da8fce53a4205daab96aa649e526335..eef289eff138c454631ffbb34d0780b1c14d99dc 100644 --- a/paddle/fluid/framework/details/memory_optimize_pass.h +++ b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.h @@ -26,13 +26,13 @@ #include #include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/details/memory_optimize_helper.h" #include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h" #include "paddle/fluid/framework/ir/pass.h" namespace paddle { namespace framework { -namespace details { +namespace ir { class MemoryOptimizePass : public ir::Pass { protected: @@ -67,6 +67,6 @@ class MemoryOptimizePass : public ir::Pass { mutable std::map> var_nodes_; }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/op_graph_view.cc b/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.cc similarity index 71% rename from paddle/fluid/framework/details/op_graph_view.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.cc index d3865c2c2919c2d43521e4f51013e5fa1b10416d..6b7249b17082c9799020fb17c7e39f4461af83b8 100644 --- a/paddle/fluid/framework/details/op_graph_view.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.cc @@ -12,17 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/op_graph_view.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h" #include #include namespace paddle { namespace framework { -namespace details { +namespace ir { -OpGraphView::OpGraphView(const std::vector &ops) { Build(ops); } +OpGraphView::OpGraphView(const std::vector &ops) { + Build(ops); +} -void OpGraphView::Build(const std::vector &ops) { +void OpGraphView::Build(const std::vector &ops) { preceding_ops_.clear(); pending_ops_.clear(); for (auto &op : ops) { @@ -40,8 +42,8 @@ void OpGraphView::Build(const std::vector &ops) { "There are duplicate ops in graph."); } -std::unordered_set OpGraphView::AllOps() const { - std::unordered_set ret; +std::unordered_set OpGraphView::AllOps() const { + std::unordered_set ret; ret.reserve(preceding_ops_.size()); for (auto &pair : preceding_ops_) { ret.insert(pair.first); @@ -49,21 +51,21 @@ std::unordered_set OpGraphView::AllOps() const { return ret; } -bool OpGraphView::HasOp(OpHandleBase *op) const { +bool OpGraphView::HasOp(details::OpHandleBase *op) const { return preceding_ops_.count(op) != 0; } -void OpGraphView::EnforceHasOp(OpHandleBase *op) const { +void OpGraphView::EnforceHasOp(details::OpHandleBase *op) const { PADDLE_ENFORCE(HasOp(op), "Cannot find op %s in OpGraphView", op == nullptr ? "nullptr" : op->DebugString()); } -const std::unordered_set &OpGraphView::PendingOps( - OpHandleBase *op) const { +const std::unordered_set &OpGraphView::PendingOps( + details::OpHandleBase *op) const { EnforceHasOp(op); return pending_ops_.at(op); } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/op_graph_view.h b/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h similarity index 62% rename from paddle/fluid/framework/details/op_graph_view.h rename to paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h index 1585c6f728531acde1d97aaac5c51b09e27c7d50..afd29091c7d61397f4721a648a470446db072288 100644 --- a/paddle/fluid/framework/details/op_graph_view.h +++ b/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h @@ -22,39 +22,42 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { class OpGraphView { public: - explicit OpGraphView(const std::vector &ops); + explicit OpGraphView(const std::vector &ops); - std::unordered_set AllOps() const; + std::unordered_set AllOps() const; - const std::unordered_set &PendingOps(OpHandleBase *op) const; + const std::unordered_set &PendingOps( + details::OpHandleBase *op) const; - bool HasOp(OpHandleBase *op) const; + bool HasOp(details::OpHandleBase *op) const; // Use a visitor to visit all pending ops of op // Stop when callback returns false template - bool VisitAllPendingOps(OpHandleBase *op, Callback &&callback) const; + bool VisitAllPendingOps(details::OpHandleBase *op, Callback &&callback) const; private: - void Build(const std::vector &ops); - void EnforceHasOp(OpHandleBase *op) const; + void Build(const std::vector &ops); + void EnforceHasOp(details::OpHandleBase *op) const; - std::unordered_map> + std::unordered_map> preceding_ops_; - std::unordered_map> + std::unordered_map> pending_ops_; }; template -bool OpGraphView::VisitAllPendingOps(OpHandleBase *op, +bool OpGraphView::VisitAllPendingOps(details::OpHandleBase *op, Callback &&callback) const { EnforceHasOp(op); - std::unordered_set visited; - std::queue q; + std::unordered_set visited; + std::queue q; q.push(op); while (!q.empty()) { op = q.front(); @@ -72,6 +75,6 @@ bool OpGraphView::VisitAllPendingOps(OpHandleBase *op, return true; } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/record_skip_memory_opt_vars_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/record_skip_memory_opt_vars_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..075a1955eb641832dd8cc3c11befd58e798b545b --- /dev/null +++ b/paddle/fluid/framework/ir/memory_optimize_pass/record_skip_memory_opt_vars_pass.cc @@ -0,0 +1,170 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h" +#include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/framework/op_proto_maker.h" +#include "paddle/fluid/framework/operator.h" + +namespace paddle { +namespace framework { +namespace ir { + +class RecordSkipMemoryOptVarsPass : public ir::Pass { + protected: + void ApplyImpl(ir::Graph* graph) const override { + PADDLE_ENFORCE(!graph->Has(kMemOptSkipVars)); + graph->Set(kMemOptSkipVars, new MemOptSkipVars); + auto& skip_vars = graph->Get(kMemOptSkipVars); + + std::vector op_nodes; + for (auto& node : graph->Nodes()) { + PADDLE_ENFORCE_NOT_NULL(node, "The node should not be nullptr."); + if (node->IsOp() && node->Op()) { + op_nodes.emplace_back(node); + } + } + + // Insert kEmptyVarName to avoid optimizing empty variable + skip_vars.insert(framework::kEmptyVarName); + + // NOTE(zcd): Insert OpRoleVars to SkipVarSet to prevent the vars are rename + // in memory optimize pass. + InsertOpRoleVarsToSkipVarSet(op_nodes, &skip_vars); + + InsertSkipMemOptOpInOutToSkipVarSet(op_nodes, &skip_vars); + } + + private: + static void InsertOpRoleVarsToSkipVarSet(const std::vector& ops, + MemOptSkipVars* skip_vars) { + for (auto& node : ops) { + try { + auto op_role_vars = + boost::get>(node->Op()->GetNullableAttr( + OpProtoAndCheckerMaker::OpRoleVarAttrName())); + PADDLE_ENFORCE_EQ(op_role_vars.size() % 2, 0); + for (size_t i = 0; i < op_role_vars.size(); i += 2) { + auto& g_name = op_role_vars[i + 1]; + skip_vars->insert(g_name); + } + } catch (boost::bad_get& e) { + } + } + } + + static void UpdateSkipVarSet( + MemOptSkipVars* skip_vars, + const std::vector>& var_names) { + for (auto& var_name : var_names) { + skip_vars->insert(var_name.begin(), var_name.end()); + } + } + + static std::vector ToGradVarName( + const std::vector& names) { + std::vector ret; + ret.reserve(names.size()); + for (auto& name : names) { + if (name != framework::kEmptyVarName) { + ret.emplace_back(framework::GradVarName(name)); + } + } + return ret; + } + + static void InsertSkipMemOptOpInOutToSkipVarSet( + const std::vector& ops, MemOptSkipVars* skip_vars) { + static std::unordered_set kSkipMemOptOps{ + "send", "recv", "prefetch", "send_barrier", "fetch_barrier"}; + + for (auto& node : ops) { + auto* op_desc = node->Op(); + // Some ops (while, conditional_block, recurrent, etc.) have sub-blocks. + // These ops often use variables from its parent or forward blocks. + // Optimizing in/out of such ops would make these variables cannot + // be found when running sub-block ops. + if (OpHasSubBlock(op_desc)) { + UpdateSkipVarSet(skip_vars, {op_desc->InputArgumentNames(), + op_desc->OutputArgumentNames()}); + } + + // Skip ops that are related to parameter server. + // In distributed mode, trainers and parameter server use same + // variable names to track same variables. We cannot change the + // names of these variables, otherwise trainers or parameter + // server would not find them. + if (kSkipMemOptOps.count(op_desc->Type()) > 0) { + UpdateSkipVarSet(skip_vars, {op_desc->InputArgumentNames(), + op_desc->OutputArgumentNames()}); + } + + // FIXME(zjl): some ops use variables that are not from their + // inputs or outputs. We do not have a nice method to solve this + // issue yet. Currently, we should skip these variables when + // memory optimization is enabled. + auto op_type = op_desc->Type(); + if (op_type == "while_grad") { + // In while_grad, framework::GradVarName(Input("X")) is visited + // without being any in/out of while_grad. While_grad uses + // these variable to accumulate gradient of X across time steps. + UpdateSkipVarSet(skip_vars, {ToGradVarName(op_desc->Input("X"))}); + } else if (op_type == "conditional_block_grad") { + // In conditional_block_grad, framework::GradVarName(Input("Input", + // "Cond")) is visited without being any in/out of + // conditional_block_grad. Conditional_block_grad uses these + // variables to accumulate gradient of Input/Cond across time steps. + UpdateSkipVarSet(skip_vars, {ToGradVarName(op_desc->Input("Input")), + ToGradVarName(op_desc->Input("Cond"))}); + } else if (op_type == "recurrent" || op_type == "recurrent_grad") { + // Recurrent and recurrent_grad ops are implemented by a very trickly + // way. Attr("states", "ex_states") is visited without being any + // in/out of op. It is because these variables are from sub blocks, + // not main block. Adding these variables to input would make recurrent + // fail since "states" and "ex_states" cannot be found in main block. + // When memory optimization is enabled, "states", "ex_states" and their + // gradient should be skipped. + auto& ex_states = + boost::get>(op_desc->GetAttr("ex_states")); + auto& states = + boost::get>(op_desc->GetAttr("states")); + if (op_type == "recurrent") { + UpdateSkipVarSet(skip_vars, {ex_states, states}); + } else { + // In recurrent_grad, framework::GradVarName(Input("parameters", + // "input")) is visited without being any in/out of recurrent_grad. + // Recurrent_grad uses these variables to accumulate gradient of + // parameters/input across time steps. + UpdateSkipVarSet( + skip_vars, + {ToGradVarName(op_desc->Input("parameters")), + ToGradVarName(op_desc->Input("input")), ex_states, states, + ToGradVarName(ex_states), ToGradVarName(states)}); + } + } + } + } +}; + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(record_skip_memory_opt_vars_pass, + paddle::framework::ir::RecordSkipMemoryOptVarsPass); diff --git a/paddle/fluid/framework/details/reference_count_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc similarity index 84% rename from paddle/fluid/framework/details/reference_count_pass.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc index 31c32cc2e7b0354b2f624f457326f33409d276e2..b927da2c3fb189dd5bb96371b033019432d5679a 100644 --- a/paddle/fluid/framework/details/reference_count_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc @@ -24,14 +24,20 @@ #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/framework/details/eager_deletion_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" -#include "paddle/fluid/framework/details/op_graph_view.h" -#include "paddle/fluid/framework/details/reference_count_pass.h" -#include "paddle/fluid/framework/details/reference_count_pass_helper.h" +#include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h" +#include "paddle/fluid/framework/ir/pass.h" namespace paddle { namespace framework { -namespace details { +namespace ir { + +class ReferenceCountPass : public ir::Pass { + protected: + void ApplyImpl(ir::Graph *graph) const override; +}; // A functor to shrink/remove operators who depend on other operators in a set class ShrinkDepsOpFunctor { @@ -39,19 +45,21 @@ class ShrinkDepsOpFunctor { enum RelationShip { kSame = 0, kNoDeps = 1, kBefore = 2, kAfter = 3 }; public: - explicit ShrinkDepsOpFunctor(const std::vector &all_ops) + explicit ShrinkDepsOpFunctor( + const std::vector &all_ops) : graph_(all_ops) {} template OpSet operator()(const OpSet &op_set) const { using KeyType = typename OpSet::key_type; static_assert( - std::is_base_of::type>::value, - "Key type of OpSet must be OpHandleBase, or derived of OpHandleBase"); + "Key type of OpSet must be details::OpHandleBase, or derived of " + "details::OpHandleBase"); if (op_set.size() <= 1) return op_set; - std::vector ops(op_set.begin(), op_set.end()); + std::vector ops(op_set.begin(), op_set.end()); OpSet ret; auto rels = GetRelations(ops); auto not_before = [](RelationShip r) { return r != kBefore; }; @@ -65,8 +73,8 @@ class ShrinkDepsOpFunctor { private: std::vector> GetRelations( - const std::vector &ops) const { - std::unordered_map op_to_idx; + const std::vector &ops) const { + std::unordered_map op_to_idx; for (size_t i = 0; i < ops.size(); ++i) { PADDLE_ENFORCE(graph_.HasOp(ops[i]), "Op does not exist in graph"); op_to_idx[ops[i]] = i; @@ -81,7 +89,7 @@ class ShrinkDepsOpFunctor { size_t found_num = ops.size(); size_t total_num = ops.size() * ops.size(); - auto visitor = [&](OpHandleBase *op, size_t i) { + auto visitor = [&](details::OpHandleBase *op, size_t i) { auto it = op_to_idx.find(op); if (it != op_to_idx.end()) { size_t j = it->second; @@ -98,7 +106,9 @@ class ShrinkDepsOpFunctor { }; for (size_t i = 0; i < ops.size(); ++i) { - auto sub_visitor = [&, i](OpHandleBase *op) { return visitor(op, i); }; + auto sub_visitor = [&, i](details::OpHandleBase *op) { + return visitor(op, i); + }; if (!graph_.VisitAllPendingOps(ops[i], sub_visitor)) { break; } @@ -133,8 +143,8 @@ class ShrinkDepsOpFunctor { */ static bool ShrinkNoNeedBufferVarOpDependency( const std::string &var_name, - std::unordered_set *op_handles) { - std::vector skip_ops; + std::unordered_set *op_handles) { + std::vector skip_ops; for (auto *op_handle : *op_handles) { auto *op_base = op_handle->GetOp(); auto &inferer = op_base->Info().NoNeedBufferVarsInferer(); @@ -195,15 +205,15 @@ static bool ShrinkNoNeedBufferVarOpDependency( * Find the nearest downstream computation op handle. If the op is a * computation op, just return itself. */ -static ComputationOpHandle *FindNextComputationOpHandleOrReturnItself( - OpHandleBase *op, size_t scope_idx) { - std::queue q; - std::unordered_set visited; +static details::ComputationOpHandle *FindNextComputationOpHandleOrReturnItself( + details::OpHandleBase *op, size_t scope_idx) { + std::queue q; + std::unordered_set visited; q.push(op); while (!q.empty()) { auto *op = q.front(); q.pop(); - auto *compute_op = dynamic_cast(op); + auto *compute_op = dynamic_cast(op); if (compute_op != nullptr && compute_op->GetScopeIdx() == scope_idx) { return compute_op; } @@ -220,13 +230,13 @@ static ComputationOpHandle *FindNextComputationOpHandleOrReturnItself( enum LastLiveOpSearchStatus { kSuccess, kFailure, kShouldPrecede }; -static std::unordered_set -ExtractComputationOpFromLastLivedVar(VarHandle *var, size_t scope_idx, +static std::unordered_set +ExtractComputationOpFromLastLivedVar(details::VarHandle *var, size_t scope_idx, const std::string &var_name, const ShrinkDepsOpFunctor &shrink_func, LastLiveOpSearchStatus *status) { // stage one. Get last op for variable. - std::unordered_set candidates; + std::unordered_set candidates; { if (var->PendingOps().empty() && var->GeneratedOp()) { // No operator depends on this variable. So the last operator is the op @@ -251,7 +261,7 @@ ExtractComputationOpFromLastLivedVar(VarHandle *var, size_t scope_idx, // some op handle may operate on many DeviceContext, however, our garbage // collector can only wait one DeviceContext for now. So currently, we wait // the nearest compute op. - std::unordered_set computation_op; + std::unordered_set computation_op; { for (auto *op : candidates) { auto *compute_op = @@ -293,13 +303,13 @@ void ReferenceCountPass::ApplyImpl(ir::Graph *graph) const { "Last Live Ops and Reference Counts of vars should be " "initialized at here."); - const auto &vars = graph->Get(kGraphVars); + const auto &vars = graph->Get(details::kGraphVars); last_live_ops_of_vars.resize(vars.size()); ref_cnts.resize(vars.size()); ShrinkDepsOpFunctor shrink_func( - ir::FilterByNodeWrapper(*graph)); + ir::FilterByNodeWrapper(*graph)); VLOG(1) << "Place number: " << vars.size(); for (size_t i = 0; i < vars.size(); ++i) { @@ -360,11 +370,10 @@ void ReferenceCountPass::ApplyImpl(ir::Graph *graph) const { } } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -REGISTER_PASS(reference_count_pass, - paddle::framework::details::ReferenceCountPass) - .RequirePassAttr(paddle::framework::details::kGlobalReferenceCount) - .RequirePassAttr(paddle::framework::details::kLastLiveOpsOfVars); +REGISTER_PASS(reference_count_pass, paddle::framework::ir::ReferenceCountPass) + .RequirePassAttr(paddle::framework::ir::kGlobalReferenceCount) + .RequirePassAttr(paddle::framework::ir::kLastLiveOpsOfVars); diff --git a/paddle/fluid/framework/details/reference_count_pass_helper.cc b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.cc similarity index 67% rename from paddle/fluid/framework/details/reference_count_pass_helper.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.cc index 94de0e6ab0a91d90a7f2c4c4fc14eb78663c95fe..ed87f73adf137fdf545209f36f996417031fcda4 100644 --- a/paddle/fluid/framework/details/reference_count_pass_helper.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.cc @@ -12,23 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/reference_count_pass_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h" #include "paddle/fluid/framework/details/var_handle.h" #include "paddle/fluid/framework/var_desc.h" namespace paddle { namespace framework { -namespace details { +namespace ir { -VarDesc *TryGetLatestVarDesc(const std::vector &vars) { +VarDesc *TryGetLatestVarDesc(const std::vector &vars) { VarDesc *var_desc = nullptr; - std::find_if(vars.rbegin(), vars.rend(), [&](VarHandle *var_handle) -> bool { - var_desc = var_handle->Node()->Var(); - return var_desc != nullptr; - }); + std::find_if(vars.rbegin(), vars.rend(), + [&](details::VarHandle *var_handle) -> bool { + var_desc = var_handle->Node()->Var(); + return var_desc != nullptr; + }); return var_desc; } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/reference_count_pass_helper.h b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h similarity index 81% rename from paddle/fluid/framework/details/reference_count_pass_helper.h rename to paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h index ce700119c54ddd711315dfa45d61b9241cfda651..d5e6fa17fd4e85f3f7bcb2c171d7e20a6ffc583c 100644 --- a/paddle/fluid/framework/details/reference_count_pass_helper.h +++ b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h @@ -22,17 +22,16 @@ #include #include +#include "paddle/fluid/framework/details/computation_op_handle.h" +#include "paddle/fluid/framework/details/var_handle.h" #include "paddle/fluid/framework/garbage_collector.h" namespace paddle { namespace framework { class VarDesc; -class VarHandle; -namespace details { - -class ComputationOpHandle; +namespace ir { using ReferenceCountMap = std::unordered_map; @@ -48,11 +47,12 @@ const char kGarbageCollector[] = "garbage_collector"; const char kAllPlaces[] = "all_places"; using LastLiveOpsOfVars = - std::unordered_map>; + std::unordered_map>; const char kLastLiveOpsOfVars[] = "last_live_ops_of_var"; -VarDesc *TryGetLatestVarDesc(const std::vector &vars); +VarDesc *TryGetLatestVarDesc(const std::vector &vars); -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/while_op_eager_deletion_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/while_op_eager_deletion_pass.cc similarity index 88% rename from paddle/fluid/framework/details/while_op_eager_deletion_pass.cc rename to paddle/fluid/framework/ir/memory_optimize_pass/while_op_eager_deletion_pass.cc index 8f7c99f12a6338ad99d988d3eda3759e323f64bb..63f996ade5648c80ab3e505ca9cddd80f93a7ef4 100644 --- a/paddle/fluid/framework/details/while_op_eager_deletion_pass.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/while_op_eager_deletion_pass.cc @@ -19,19 +19,19 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { class WhileOpEagerDeletionPass : public ir::Pass { protected: void ApplyImpl(ir::Graph *graph) const override { - auto all_ops = ir::FilterByNodeWrapper(*graph); + auto all_ops = ir::FilterByNodeWrapper(*graph); // Find all while_op and while_grad_op std::unordered_map, std::vector>> target_ops; for (auto *op : all_ops) { - auto compute_op = dynamic_cast(op); + auto compute_op = dynamic_cast(op); if (compute_op == nullptr) continue; if (compute_op->Name() == "while") { @@ -52,9 +52,9 @@ class WhileOpEagerDeletionPass : public ir::Pass { } }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle REGISTER_PASS(while_op_eager_deletion_pass, - paddle::framework::details::WhileOpEagerDeletionPass); + paddle::framework::ir::WhileOpEagerDeletionPass); diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/CMakeLists.txt b/paddle/fluid/framework/ir/multi_devices_graph_pass/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..096428e58ab17deda14e70229ef033dbdd7bd04b --- /dev/null +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/CMakeLists.txt @@ -0,0 +1,16 @@ +cc_library(modify_op_lock_and_record_event_pass SRCS modify_op_lock_and_record_event_pass.cc DEPS computation_op_handle op_graph_view multi_devices_helper) + +cc_library(multi_devices_graph_print_pass SRCS multi_devices_graph_print_pass.cc DEPS multi_devices_helper) +cc_library(multi_devices_graph_check_pass SRCS multi_devices_graph_check_pass.cc DEPS multi_devices_helper) + +set(ALL_REDUCE_OP_HANDLES all_reduce_op_handle) +if(WITH_GPU AND WITH_DGC) + list(APPEND ALL_REDUCE_OP_HANDLES sparse_all_reduce_op_handle) +endif() + +cc_library(multi_devices_graph_pass SRCS multi_devices_graph_pass.cc DEPS multi_devices_helper computation_op_handle + scale_loss_grad_op_handle rpc_op_handle fetch_barrier_op_handle ${ALL_REDUCE_OP_HANDLES} reduce_op_handle broadcast_op_handle fused_broadcast_op_handle) +cc_library(sequential_execution_pass SRCS sequential_execution_pass.cc DEPS graph graph_helper pass) + +cc_library(fuse_all_reduce_op_pass SRCS fuse_all_reduce_op_pass.cc DEPS graph graph_helper fused_all_reduce_op_handle) +cc_library(all_reduce_deps_pass SRCS all_reduce_deps_pass.cc DEPS all_reduce_op_handle graph graph_helper pass) diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/all_reduce_deps_pass.cc similarity index 76% rename from paddle/fluid/framework/details/all_reduce_deps_pass.cc rename to paddle/fluid/framework/ir/multi_devices_graph_pass/all_reduce_deps_pass.cc index c44793cd11d22b29b4b3422a047d81fe26624982..314f8c0424d5fd9b9908f462ccf2227e1dd983db 100644 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/all_reduce_deps_pass.cc @@ -23,7 +23,6 @@ #include "paddle/fluid/framework/details/all_reduce_op_handle.h" #include "paddle/fluid/framework/details/container_cast.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" -#include "paddle/fluid/framework/details/op_graph_view.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/framework/ir/pass.h" @@ -31,17 +30,18 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { class AllReduceDepsPass : public ir::Pass { protected: void ApplyImpl(ir::Graph* graph) const override { - std::vector all_reduce_op_handles = + std::vector all_reduce_op_handles = GetSortedAllReduceOps(*graph); for (size_t i = 1; i < all_reduce_op_handles.size(); ++i) { - auto* dep_var = new DummyVarHandle(graph->CreateControlDepVar()); - graph->Get(kGraphDepVars).emplace(dep_var); + auto* dep_var = new details::DummyVarHandle(graph->CreateControlDepVar()); + graph->Get(details::kGraphDepVars) + .emplace(dep_var); all_reduce_op_handles[i - 1]->AddOutput(dep_var); all_reduce_op_handles[i]->AddInput(dep_var); } @@ -51,16 +51,16 @@ class AllReduceDepsPass : public ir::Pass { } } - std::vector GetSortedAllReduceOps( + std::vector GetSortedAllReduceOps( const ir::Graph& graph) const { - std::vector all_reduce_op_handles; - std::unordered_map pending_ops; - std::unordered_set ready_ops; - std::unordered_set next_ready_ops; + std::vector all_reduce_op_handles; + std::unordered_map pending_ops; + std::unordered_set ready_ops; + std::unordered_set next_ready_ops; - auto op_handles = ir::FilterByNodeWrapper(graph); + auto op_handles = ir::FilterByNodeWrapper(graph); size_t num_of_ops = op_handles.size(); - for (OpHandleBase* op : op_handles) { + for (details::OpHandleBase* op : op_handles) { size_t not_ready_vars = op->NotReadyInputSize(); if (not_ready_vars) { pending_ops.insert({op, not_ready_vars}); @@ -94,11 +94,12 @@ class AllReduceDepsPass : public ir::Pass { } void GetSortedAllReduceOps( - const std::unordered_set& ready_ops, - std::vector* all_reduce_op_handles) const { - std::vector current_all_reduce_op_handles; + const std::unordered_set& ready_ops, + std::vector* all_reduce_op_handles) const { + std::vector current_all_reduce_op_handles; for (auto& op_handle : ready_ops) { - auto all_reduce_op_handle = dynamic_cast(op_handle); + auto all_reduce_op_handle = + dynamic_cast(op_handle); if (all_reduce_op_handle) { current_all_reduce_op_handles.emplace_back(all_reduce_op_handle); } @@ -109,10 +110,12 @@ class AllReduceDepsPass : public ir::Pass { // Sort the current_all_reduce_op_handles according to the name of input. sort(current_all_reduce_op_handles.begin(), current_all_reduce_op_handles.end(), - [](const AllReduceOpHandle* left, - const AllReduceOpHandle* right) -> bool { - auto left_in_vars = DynamicCast(left->Inputs()); - auto right_in_vars = DynamicCast(right->Inputs()); + [](const details::AllReduceOpHandle* left, + const details::AllReduceOpHandle* right) -> bool { + auto left_in_vars = + details::DynamicCast(left->Inputs()); + auto right_in_vars = + details::DynamicCast(right->Inputs()); PADDLE_ENFORCE_GT(left_in_vars.size(), 0); PADDLE_ENFORCE_EQ(left_in_vars.size(), right_in_vars.size()); return left_in_vars[0]->Name() > right_in_vars[0]->Name(); @@ -123,15 +126,15 @@ class AllReduceDepsPass : public ir::Pass { current_all_reduce_op_handles.end()); } - void DebugString( - const ir::Graph& graph, - const std::vector& all_reduce_op_handles) const { + void DebugString(const ir::Graph& graph, + const std::vector& + all_reduce_op_handles) const { // get vars order std::map> vars = GetSoredGradientsFromStaleProgram(graph); std::stringstream out; size_t grads_of_stale_program = 0; - out << "Get Order From kStaleProgramOpDescs: "; + out << "Get Order From details::kStaleProgramOpDescs: "; for (auto& var : vars) { out << "Order " << var.first << " ["; for (auto& var_name : var.second) { @@ -147,7 +150,7 @@ class AllReduceDepsPass : public ir::Pass { for (auto& op : all_reduce_op_handles) { bool find_valid_input = false; for (auto& in_var : op->Inputs()) { - if (dynamic_cast(in_var)) { + if (dynamic_cast(in_var)) { out2 << in_var->Name() << ", "; find_valid_input = true; break; @@ -165,7 +168,8 @@ class AllReduceDepsPass : public ir::Pass { std::map> GetSoredGradientsFromStaleProgram( const ir::Graph& graph) const { std::map> vars; - auto ops = graph.Get>(kStaleProgramOpDescs); + auto ops = + graph.Get>(details::kStaleProgramOpDescs); int order = 0; for (auto* op_desc : ops) { try { @@ -193,10 +197,9 @@ class AllReduceDepsPass : public ir::Pass { return vars; } }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -REGISTER_PASS(all_reduce_deps_pass, - paddle::framework::details::AllReduceDepsPass) +REGISTER_PASS(all_reduce_deps_pass, paddle::framework::ir::AllReduceDepsPass) .RequireGraphAttr(paddle::framework::details::kStaleProgramOpDescs); diff --git a/paddle/fluid/framework/details/fuse_all_reduce_op_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc similarity index 75% rename from paddle/fluid/framework/details/fuse_all_reduce_op_pass.cc rename to paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc index 31efd78ad3dbed73d7993bac47694c9d6d742343..a2b4c37ab4ace84a48fb428131fc9f92b9d866c3 100644 --- a/paddle/fluid/framework/details/fuse_all_reduce_op_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc @@ -24,21 +24,22 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { class FuseAllReduceOpPass : public ir::Pass { protected: void ApplyImpl(ir::Graph *graph) const override { ir::Graph &result = *graph; - auto &places = Get>(kPlaces); - auto &local_scopes = Get>(kLocalScopes); + auto &places = Get>(details::kPlaces); + auto &local_scopes = Get>(details::kLocalScopes); #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - auto *nccl_ctxs = &Get(kNCCLCtxs); + auto *nccl_ctxs = &Get(details::kNCCLCtxs); #endif std::unordered_set grads; - auto ¶ms_grads = result.Get(kParamsAndGrads); + auto ¶ms_grads = + result.Get(details::kParamsAndGrads); size_t num_of_all_reduce = params_grads.size(); grads.reserve(num_of_all_reduce); for (auto p_g : params_grads) { @@ -50,11 +51,12 @@ class FuseAllReduceOpPass : public ir::Pass { all_reduce_ops.reserve(grads.size()); for (auto &node : result.Nodes()) { if (node->IsOp()) { - PADDLE_ENFORCE(node->IsWrappedBy()); - auto *all_reduce_op_handle = - dynamic_cast(&node->Wrapper()); + PADDLE_ENFORCE(node->IsWrappedBy()); + auto *all_reduce_op_handle = dynamic_cast( + &node->Wrapper()); if (all_reduce_op_handle) { - auto inputs = DynamicCast(all_reduce_op_handle->Inputs()); + auto inputs = details::DynamicCast( + all_reduce_op_handle->Inputs()); PADDLE_ENFORCE_EQ(inputs.size(), num_place); // The inputs' name should be the same. auto &grad_name = inputs[0]->name(); @@ -80,7 +82,7 @@ class FuseAllReduceOpPass : public ir::Pass { VLOG(10) << "Insert fused_all_reduce"; auto &group_grads_params = - graph->Get(kGroupGradsAndParams); + graph->Get(details::kGroupGradsAndParams); for (auto &group_g_p : group_grads_params) { size_t group_size = group_g_p.size(); @@ -108,24 +110,25 @@ class FuseAllReduceOpPass : public ir::Pass { const platform::NCCLContextMap *nccl_ctxs, #endif ir::Graph *result) const { - std::vector inputs; - std::vector outputs; + std::vector inputs; + std::vector outputs; for (auto &op : all_reduce_ops) { - auto &op_handle = op->Wrapper(); + auto &op_handle = op->Wrapper(); inputs.insert(inputs.end(), op_handle.Inputs().begin(), op_handle.Inputs().end()); // Remove output for_each(op_handle.Inputs().begin(), op_handle.Inputs().end(), - [&op_handle](VarHandleBase *var_handle) { + [&op_handle](details::VarHandleBase *var_handle) { var_handle->RemoveOutput(&op_handle, op_handle.Node()); }); outputs.insert(outputs.end(), op_handle.Outputs().begin(), op_handle.Outputs().end()); // Remove Input - for_each( - op_handle.Outputs().begin(), op_handle.Outputs().end(), - [](VarHandleBase *var_handle) { var_handle->ClearGeneratedOp(); }); + for_each(op_handle.Outputs().begin(), op_handle.Outputs().end(), + [](details::VarHandleBase *var_handle) { + var_handle->ClearGeneratedOp(); + }); result->RemoveNode(op_handle.Node()); } @@ -140,21 +143,22 @@ class FuseAllReduceOpPass : public ir::Pass { } private: - void CreateFusedAllReduceOp(const std::vector &inputs, - const std::vector &outputs, - const size_t num_of_all_reduce, - const std::vector &places, - const std::vector &local_scopes, + void CreateFusedAllReduceOp( + const std::vector &inputs, + const std::vector &outputs, + const size_t num_of_all_reduce, + const std::vector &places, + const std::vector &local_scopes, #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - const platform::NCCLContextMap *nccl_ctxs, + const platform::NCCLContextMap *nccl_ctxs, #endif - ir::Graph *result) const { + ir::Graph *result) const { #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - auto *op_handle = new FusedAllReduceOpHandle( + auto *op_handle = new details::FusedAllReduceOpHandle( result->CreateEmptyNode("fused_all_reduce", ir::Node::Type::kOperation), local_scopes, places, num_of_all_reduce, nccl_ctxs); #else - auto *op_handle = new FusedAllReduceOpHandle( + auto *op_handle = new details::FusedAllReduceOpHandle( result->CreateEmptyNode("fused_all_reduce", ir::Node::Type::kOperation), local_scopes, places, num_of_all_reduce); #endif @@ -176,8 +180,9 @@ class FuseAllReduceOpPass : public ir::Pass { #endif } - void SetCommunicationContext(const std::vector &places, - FusedAllReduceOpHandle *op_handle) const { + void SetCommunicationContext( + const std::vector &places, + details::FusedAllReduceOpHandle *op_handle) const { for (size_t i = 0; i < places.size(); ++i) { op_handle->SetDeviceContext( places[i], platform::DeviceContextPool::Instance().Get(places[i])); @@ -185,9 +190,9 @@ class FuseAllReduceOpPass : public ir::Pass { } }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle REGISTER_PASS(fuse_all_reduce_op_pass, - paddle::framework::details::FuseAllReduceOpPass); + paddle::framework::ir::FuseAllReduceOpPass); diff --git a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/modify_op_lock_and_record_event_pass.cc similarity index 53% rename from paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc rename to paddle/fluid/framework/ir/multi_devices_graph_pass/modify_op_lock_and_record_event_pass.cc index ae363f96393bddac4c88c7caf0ef6087ea848fb9..e9b35aefc94e8544455e9559746990cdb4362ebb 100644 --- a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/modify_op_lock_and_record_event_pass.cc @@ -12,21 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.h" #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" -#include "paddle/fluid/framework/details/op_graph_view.h" #include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h" namespace paddle { namespace framework { -namespace details { +namespace ir { static bool IsLockAndRecordEventFreeComputationOpHandle( - ComputationOpHandle *op, const OpGraphView &graph_view) { + details::ComputationOpHandle *op, const OpGraphView &graph_view) { if (!platform::is_gpu_place(op->GetPlace())) return false; for (auto &pending_op : graph_view.PendingOps(op)) { - auto *tmp = dynamic_cast(pending_op); + auto *tmp = dynamic_cast(pending_op); if (tmp == nullptr || !(tmp->GetPlace() == op->GetPlace())) { return false; } @@ -34,25 +33,27 @@ static bool IsLockAndRecordEventFreeComputationOpHandle( return true; } -void ModifyOpLockAndRecordEventPass::ApplyImpl(ir::Graph *ir_graph) const { - auto all_ops = ir::FilterByNodeWrapper(*ir_graph); - OpGraphView graph_view(all_ops); - for (auto &op : all_ops) { - auto *compute_op = dynamic_cast(op); - if (compute_op == nullptr) continue; - bool is_lock_and_record_event_free = - IsLockAndRecordEventFreeComputationOpHandle(compute_op, graph_view); - compute_op->SetLockAndRecordEventFree(is_lock_and_record_event_free); - if (is_lock_and_record_event_free) { - VLOG(10) << "Set is_lock_and_record_event_free be true in op " - << compute_op->DebugString(); +class ModifyOpLockAndRecordEventPass : public ir::Pass { + protected: + void ApplyImpl(ir::Graph *graph) const override { + auto all_ops = ir::FilterByNodeWrapper(*graph); + OpGraphView graph_view(all_ops); + for (auto &op : all_ops) { + auto *compute_op = dynamic_cast(op); + if (compute_op == nullptr) continue; + bool is_lock_and_record_event_free = + IsLockAndRecordEventFreeComputationOpHandle(compute_op, graph_view); + compute_op->SetLockAndRecordEventFree(is_lock_and_record_event_free); + if (is_lock_and_record_event_free) { + VLOG(10) << "Set is_lock_and_record_event_free be true in op " + << compute_op->DebugString(); + } } } -} - -} // namespace details +}; +} // namespace ir } // namespace framework } // namespace paddle REGISTER_PASS(modify_op_lock_and_record_event_pass, - paddle::framework::details::ModifyOpLockAndRecordEventPass); + paddle::framework::ir::ModifyOpLockAndRecordEventPass); diff --git a/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_check_pass.cc similarity index 76% rename from paddle/fluid/framework/details/multi_devices_graph_check_pass.cc rename to paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_check_pass.cc index 9859b04dec4193812769cc63d4489a9150b973f2..8cc33a6ceb9f14d6360f03625a83bee23a577c9f 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_check_pass.cc @@ -19,7 +19,7 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { class SSAGraghBuilderWithChecker : public ir::Pass { protected: @@ -28,19 +28,19 @@ class SSAGraghBuilderWithChecker : public ir::Pass { } bool IsValidGraph(const ir::Graph *graph) const { - std::unordered_map pending_ops; - std::unordered_set pending_vars; - std::unordered_set ready_vars; - std::unordered_set ready_ops; + std::unordered_map pending_ops; + std::unordered_set pending_vars; + std::unordered_set ready_vars; + std::unordered_set ready_ops; - auto insert_pending_var = [&](VarHandleBase *var) { + auto insert_pending_var = [&](details::VarHandleBase *var) { pending_vars.insert(var); if (var->GeneratedOp() == nullptr) { ready_vars.emplace(var); } }; - for (auto &var_map : graph->Get(kGraphVars)) { + for (auto &var_map : graph->Get(details::kGraphVars)) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { insert_pending_var(version_pair); @@ -48,11 +48,12 @@ class SSAGraghBuilderWithChecker : public ir::Pass { } } - for (auto &var : graph->Get(kGraphDepVars)) { + for (auto &var : + graph->Get(details::kGraphDepVars)) { insert_pending_var(var); } - for (OpHandleBase *op : ir::FilterByNodeWrapper(*graph)) { + for (auto *op : ir::FilterByNodeWrapper(*graph)) { if (op->Inputs().empty()) { ready_ops.insert(op); } else { @@ -60,7 +61,7 @@ class SSAGraghBuilderWithChecker : public ir::Pass { } } - auto run_all_ops = [&](std::unordered_set &set) { + auto run_all_ops = [&](std::unordered_set &set) { for (auto *op : set) { for (auto out : op->Outputs()) { ready_vars.emplace(out); @@ -91,11 +92,11 @@ class SSAGraghBuilderWithChecker : public ir::Pass { } }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle REGISTER_PASS(multi_devices_check_pass, - paddle::framework::details::SSAGraghBuilderWithChecker) + paddle::framework::ir::SSAGraghBuilderWithChecker) .RequireGraphAttr(paddle::framework::details::kGraphVars) .RequireGraphAttr(paddle::framework::details::kGraphDepVars); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc similarity index 82% rename from paddle/fluid/framework/details/multi_devices_graph_pass.cc rename to paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc index e9aab179d24945ee4a0067df7030192dedf56d58..a4cb0599ac4dd061836ff5d4e64a94ad56c72da5 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/multi_devices_graph_pass.h" +#include "paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.h" #include #include #include @@ -40,13 +40,13 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { namespace { // TODO(panyx0718): Clean this up as well. // all operators. NOTE that even we use a vector here, the operators is // unordered. -typedef std::vector GraphOps; +typedef std::vector GraphOps; const char kGraphOps[] = "ops"; bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) { @@ -56,7 +56,7 @@ bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) { } void PolishGraphToSupportDataHazards(ir::Graph *graph) { - for (auto &var_map : graph->Get(kGraphVars)) { + for (auto &var_map : graph->Get(details::kGraphVars)) { for (auto &name_pair : var_map) { if (name_pair.second.size() <= 1) { continue; @@ -65,7 +65,7 @@ void PolishGraphToSupportDataHazards(ir::Graph *graph) { auto it_old = name_pair.second.rbegin(); ++it_old; for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) { - OpHandleBase *write_op = (*it_new)->GeneratedOp(); + details::OpHandleBase *write_op = (*it_new)->GeneratedOp(); const auto &read_ops = (*it_old)->PendingOps(); for (auto *read_op : read_ops) { @@ -85,28 +85,31 @@ void PolishGraphToSupportDataHazards(ir::Graph *graph) { } if (has_dep) continue; - auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar()); + auto *dep_var = + new details::DummyVarHandle(graph->CreateControlDepVar()); read_op->AddOutput(dep_var); write_op->AddInput(dep_var); - graph->Get(kGraphDepVars).emplace(dep_var); + graph->Get(details::kGraphDepVars) + .emplace(dep_var); } } } } } -VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node, - const platform::Place &place, - size_t place_offset) { - auto &var_holders = graph->Get(kGraphVars)[place_offset]; +details::VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node, + const platform::Place &place, + size_t place_offset) { + auto &var_holders = + graph->Get(details::kGraphVars)[place_offset]; auto &var_holder = var_holders[node->Name()]; - VarHandle *var = nullptr; + details::VarHandle *var = nullptr; if (var_holder.empty()) { if (node->Var()) { - var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset, - node->Name(), place); + var = new details::VarHandle(graph->CreateVarNode(node->Var()), 0, + place_offset, node->Name(), place); } else { - var = new VarHandle( + var = new details::VarHandle( graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0, place_offset, node->Name(), place); } @@ -117,14 +120,14 @@ VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node, return var; } -void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle, +void CreateOpOutput(ir::Graph *graph, details::OpHandleBase *op_handle, ir::Node *new_node, const platform::Place &place, size_t place_offset) { - auto &vars = - graph->Get(kGraphVars)[place_offset][new_node->Name()]; + auto &vars = graph->Get( + details::kGraphVars)[place_offset][new_node->Name()]; size_t version = vars.size(); - auto var = - new VarHandle(new_node, version, place_offset, new_node->Name(), place); + auto var = new details::VarHandle(new_node, version, place_offset, + new_node->Name(), place); vars.emplace_back(var); op_handle->AddOutput(var); } @@ -134,8 +137,10 @@ void AddOutputToLeafOps(ir::Graph *graph) { if (!op->Outputs().empty()) { continue; } - auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar()); - graph->Get(kGraphDepVars).emplace(dummy_leaf); + auto *dummy_leaf = + new details::DummyVarHandle(graph->CreateControlDepVar()); + graph->Get(details::kGraphDepVars) + .emplace(dummy_leaf); op->AddOutput(dummy_leaf); } } @@ -148,11 +153,11 @@ void MultiDevSSAGraphBuilderBase::Init() const { loss_var_name_ = Get(kLossVarName); VLOG(10) << "Init MultiDevSSAGraphBuilder, loss name: " << loss_var_name_; - places_ = Get>(kPlaces); - local_scopes_ = Get>(kLocalScopes); - strategy_ = Get(kStrategy); + places_ = Get>(details::kPlaces); + local_scopes_ = Get>(details::kLocalScopes); + strategy_ = Get(kStrategy); #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - nccl_ctxs_ = &Get(kNCCLCtxs); + nccl_ctxs_ = &Get(details::kNCCLCtxs); #endif PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size()); } @@ -172,8 +177,8 @@ void MultiDevSSAGraphBuilderBase::ApplyImpl(ir::Graph *graph) const { } // We cannot invoke resize. It is a bug of GCC 4.8 - result.Set(kGraphVars, new GraphVars(places_.size())); - result.Set(kGraphDepVars, new GraphDepVars); + result.Set(details::kGraphVars, new details::GraphVars(places_.size())); + result.Set(details::kGraphDepVars, new details::GraphDepVars); result.Set(kGraphOps, new GraphOps); bool is_forwarding = true; @@ -260,13 +265,13 @@ void MultiDevSSAGraphBuilderBase::InsertScaleLossGradOp( // user can customize loss@grad if not use_default_grad_scale_ size_t loss_scale = 0; switch (this->strategy_.gradient_scale_) { - case BuildStrategy::GradientScaleStrategy::kOne: + case details::BuildStrategy::GradientScaleStrategy::kOne: loss_scale = 1; break; - case BuildStrategy::GradientScaleStrategy::kCoeffNumDevice: + case details::BuildStrategy::GradientScaleStrategy::kCoeffNumDevice: loss_scale = Get(kNRanks); break; - case BuildStrategy::GradientScaleStrategy::kCustomized: + case details::BuildStrategy::GradientScaleStrategy::kCustomized: loss_scale = 0; break; default: @@ -328,7 +333,8 @@ void MultiDevSSAGraphBuilderBase::CreateOpHandleIOs(ir::Graph *result, platform::DeviceContextPool::Instance().Get(p)); for (ir::Node *input : node->inputs) { - VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id); + details::VarHandle *var = + CreateOrGetLatestVarHandle(result, input, p, place_id); op_handle->AddInput(var); } @@ -345,7 +351,7 @@ void MultiDevSSAGraphBuilderBase::CreateOpHandleIOs(ir::Graph *result, } void MultiDevSSAGraphBuilderBase::SetCommunicationContext( - OpHandleBase *op_handle, const platform::Place &p) const { + details::OpHandleBase *op_handle, const platform::Place &p) const { #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) if (nccl_ctxs_ == nullptr) { op_handle->SetDeviceContext(p, @@ -361,25 +367,28 @@ void MultiDevSSAGraphBuilderBase::CreateBroadcastOp(ir::Graph *result, const std::string &p_name, size_t src_dev_id) const { #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - auto *op_handle = new BroadcastOpHandle( + auto *op_handle = new details::BroadcastOpHandle( result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation), local_scopes_, places_, nccl_ctxs_); #else - auto *op_handle = new BroadcastOpHandle( + auto *op_handle = new details::BroadcastOpHandle( result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation), local_scopes_, places_); #endif result->Get(kGraphOps).emplace_back(op_handle); - auto *in = - result->Get(kGraphVars).at(src_dev_id).at(p_name).back(); + auto *in = result->Get(details::kGraphVars) + .at(src_dev_id) + .at(p_name) + .back(); op_handle->AddInput(in); for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; SetCommunicationContext(op_handle, p); - auto &vars = result->Get(kGraphVars).at(i).at(p_name); - auto *out_var = new VarHandle( + auto &vars = + result->Get(details::kGraphVars).at(i).at(p_name); + auto *out_var = new details::VarHandle( result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(), i, p_name, p); vars.emplace_back(out_var); @@ -391,11 +400,11 @@ void MultiDevSSAGraphBuilderBase::CreateFusedBroadcastOp( ir::Graph *result, const std::vector> &bcast_varnames) const { #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - auto *op_handle = new FusedBroadcastOpHandle( + auto *op_handle = new details::FusedBroadcastOpHandle( result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation), local_scopes_, places_, nccl_ctxs_); #else - auto *op_handle = new FusedBroadcastOpHandle( + auto *op_handle = new details::FusedBroadcastOpHandle( result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation), local_scopes_, places_); #endif @@ -408,14 +417,17 @@ void MultiDevSSAGraphBuilderBase::CreateFusedBroadcastOp( for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) { for (auto &p_name : bcast_varnames[dev_id]) { - auto *in = - result->Get(kGraphVars).at(dev_id).at(p_name).back(); + auto *in = result->Get(details::kGraphVars) + .at(dev_id) + .at(p_name) + .back(); op_handle->AddInput(in); for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) { auto &p = places_[out_dev_id]; - auto &vars = - result->Get(kGraphVars).at(out_dev_id).at(p_name); - auto *out_var = new VarHandle( + auto &vars = result->Get(details::kGraphVars) + .at(out_dev_id) + .at(p_name); + auto *out_var = new details::VarHandle( result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(), out_dev_id, p_name, p); vars.emplace_back(out_var); @@ -429,39 +441,44 @@ void MultiDevSSAGraphBuilderBase::CreateComputationalOp(ir::Graph *result, ir::Node *node, size_t dev_id) const { result->Get(kGraphOps).emplace_back( - new ComputationOpHandle(result->CreateOpNode(node->Op()), - local_scopes_[dev_id], places_[dev_id], dev_id)); + new details::ComputationOpHandle(result->CreateOpNode(node->Op()), + local_scopes_[dev_id], places_[dev_id], + dev_id)); CreateOpHandleIOs(result, node, dev_id); } void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result, const std::string &og, bool is_encoded) const { - OpHandleBase *op_handle = nullptr; + details::OpHandleBase *op_handle = nullptr; auto append_allreduce_op = [&]( const std::vector &scopes, - const std::vector &places) -> OpHandleBase * { + const std::vector &places) -> details::OpHandleBase * { #if defined(PADDLE_WITH_DGC) if (is_encoded) { - result->Get(kGraphOps).emplace_back(new SparseAllReduceOpHandle( - result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), - scopes, places, nccl_ctxs_, is_encoded, - static_cast(strategy_.trainers_endpoints_.size()) * - places_.size())); + result->Get(kGraphOps).emplace_back( + new details::SparseAllReduceOpHandle( + result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), + scopes, places, nccl_ctxs_, is_encoded, + static_cast(strategy_.trainers_endpoints_.size()) * + places_.size())); } else { - result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( - result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), - scopes, places, nccl_ctxs_)); + result->Get(kGraphOps).emplace_back( + new details::AllReduceOpHandle( + result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), + scopes, places, nccl_ctxs_)); } #elif defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( - result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), - scopes, places, nccl_ctxs_)); + result->Get(kGraphOps).emplace_back( + new details::AllReduceOpHandle( + result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), + scopes, places, nccl_ctxs_)); #else - result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( - result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), - scopes, places)); + result->Get(kGraphOps).emplace_back( + new details::AllReduceOpHandle( + result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), + scopes, places)); #endif return result->Get(kGraphOps).back(); }; @@ -475,15 +492,15 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result, } SetCommunicationContext(op_handle, places_[i]); - auto &vars = result->Get(kGraphVars)[i][og]; + auto &vars = result->Get(details::kGraphVars)[i][og]; PADDLE_ENFORCE(!vars.empty()); auto &prev_grad = vars.back(); op_handle->AddInput(prev_grad); VLOG(10) << "all_reduce_op_handle add input " << prev_grad->DebugString(); - auto var = - new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable), - vars.size(), i, og, places_[i]); + auto var = new details::VarHandle( + result->CreateEmptyNode(og, ir::Node::Type::kVariable), vars.size(), i, + og, places_[i]); vars.emplace_back(var); op_handle->AddOutput(var); VLOG(10) << "all_reduce_op_handle add output " << og @@ -497,7 +514,7 @@ void MultiDevSSAGraphBuilderBase::CreateScaleLossGradOp( proto::VarType::Type dtype) const { for (size_t i = 0; i < places_.size(); ++i) { auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]); - auto *op_handle = new ScaleLossGradOpHandle( + auto *op_handle = new details::ScaleLossGradOpHandle( result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation), loss_scale, local_scopes_[i], places_[i], dev_ctx, dtype); result->Get(kGraphOps).emplace_back(op_handle); @@ -518,20 +535,21 @@ void MultiDevSSAGraphBuilderBase::CreateComputationalOps( for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) { auto p = places_[scope_idx]; auto s = local_scopes_[scope_idx]; - result->Get(kGraphOps).emplace_back(new ComputationOpHandle( - result->CreateOpNode(node->Op()), s, p, scope_idx)); + result->Get(kGraphOps).emplace_back( + new details::ComputationOpHandle(result->CreateOpNode(node->Op()), s, p, + scope_idx)); CreateOpHandleIOs(result, node, scope_idx); } } -VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp( +details::VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp( ir::Graph *result, const std::string &og, size_t dst_dev_id) const { #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) - result->Get(kGraphOps).emplace_back(new ReduceOpHandle( + result->Get(kGraphOps).emplace_back(new details::ReduceOpHandle( result->CreateEmptyNode("reduce", ir::Node::Type::kOperation), local_scopes_, places_, nccl_ctxs_)); #else - result->Get(kGraphOps).emplace_back(new ReduceOpHandle( + result->Get(kGraphOps).emplace_back(new details::ReduceOpHandle( result->CreateEmptyNode("reduce", ir::Node::Type::kOperation), local_scopes_, places_)); #endif @@ -540,15 +558,16 @@ VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp( for (size_t i = 0; i < places_.size(); ++i) { auto &p = places_[i]; SetCommunicationContext(op_handle, p); - auto &vars = result->Get(kGraphVars)[i][og]; + auto &vars = result->Get(details::kGraphVars)[i][og]; PADDLE_ENFORCE(!vars.empty()); auto &prev_grad = vars.back(); op_handle->AddInput(prev_grad); } - auto &vars = result->Get(kGraphVars)[dst_dev_id][og]; - auto var = - new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable), - vars.size(), dst_dev_id, og, places_[dst_dev_id]); + auto &vars = + result->Get(details::kGraphVars)[dst_dev_id][og]; + auto var = new details::VarHandle( + result->CreateEmptyNode(og, ir::Node::Type::kVariable), vars.size(), + dst_dev_id, og, places_[dst_dev_id]); vars.emplace_back(var); op_handle->AddOutput(var); return var; @@ -596,7 +615,7 @@ int BalanceVarSSAGraphBuilder::GetVarDeviceID( } int BalanceVarSSAGraphBuilder::GetOpDeviceID(ir::Node *node) const { - if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) { + if (strategy_.reduce_ != details::BuildStrategy::ReduceStrategy::kReduce) { return -1; } if (!OpHaveRole(*node, framework::OpRole::kOptimize)) { @@ -830,9 +849,10 @@ bool DistSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result, void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) { auto *op_handle = result->Get(kGraphOps).back(); for (ir::Node *input : node->inputs) { - VarHandle *var = nullptr; + details::VarHandle *var = nullptr; for (int place_offset = 0; place_offset < num_places; ++place_offset) { - auto &var_holders = result->Get(kGraphVars)[place_offset]; + auto &var_holders = + result->Get(details::kGraphVars)[place_offset]; auto &var_holder = var_holders[input->Name()]; if (!var_holder.empty()) { var = *var_holder.rbegin(); @@ -852,7 +872,8 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const { "This hack no longer holds, please fix."); // the variable name which contains .block means it was splited by // split_byref op - if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce && + if (strategy_.reduce_ == + details::BuildStrategy::ReduceStrategy::kAllReduce && node->inputs[0]->Name().find(".block") == std::string::npos) { std::vector input_var_names; for (ir::Node *n : node->inputs) { @@ -898,10 +919,11 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const { // Create fetch_barrier op handle to enable output on all devices. // **NOTE** fetch_barrier should output variables list same as recv op does. if (node->Op()->Type() == "fetch_barrier") { - result->Get(kGraphOps).emplace_back(new FetchBarrierOpHandle( - result->CreateOpNode(node->Op()), local_scopes_, places_)); + result->Get(kGraphOps).emplace_back( + new details::FetchBarrierOpHandle(result->CreateOpNode(node->Op()), + local_scopes_, places_)); } else { - result->Get(kGraphOps).emplace_back(new RPCOpHandle( + result->Get(kGraphOps).emplace_back(new details::RPCOpHandle( result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id], node->Op()->Type(), places_[op_dev_id])); } @@ -954,7 +976,8 @@ int DistSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, node->Op()->Type() == "split_ids") { // TODO(paddle-dev): getting the first var is not safe. op_dev_id = GetVarDeviceID(input_var_names[0]); - if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) { + if (strategy_.reduce_ == + details::BuildStrategy::ReduceStrategy::kAllReduce) { op_dev_id = GetAppropriateDeviceID(input_var_names); for (auto &varname : input_var_names) { sharded_var_device_.emplace(varname, op_dev_id); @@ -985,7 +1008,7 @@ int DistSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, #if defined(PADDLE_WITH_DGC) bool AllReduceSSAGraphBuilder::IsEncoded(const std::string &p_name) const { - auto u_name = p_name + g_dgc_u; + auto u_name = p_name + details::g_dgc_u; auto it = all_vars_.find(u_name); if (it == all_vars_.end()) { VLOG(10) << "can't find u_name, so it's not encoded:" << u_name; @@ -1006,12 +1029,12 @@ void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result, // collective gradient to each device size_t cur_device_id = 0; switch (strategy_.reduce_) { - case BuildStrategy::ReduceStrategy::kReduce: + case details::BuildStrategy::ReduceStrategy::kReduce: cur_device_id = GetAppropriateDeviceID({g_name}); CreateReduceOp(result, g_name, cur_device_id); sharded_var_device_.emplace(g_name, cur_device_id); break; - case BuildStrategy::ReduceStrategy::kAllReduce: + case details::BuildStrategy::ReduceStrategy::kAllReduce: if (IsSparseGradient(g_name)) { CreateReduceOp(result, g_name, 0); CreateBroadcastOp(result, g_name, 0); @@ -1038,7 +1061,7 @@ void DistSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const { // 4. CPU && Reduce: because all parameters share the same memory, did not // broadcast received parameters. if (!UseGPU() && - strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce) { + strategy_.reduce_ == details::BuildStrategy::ReduceStrategy::kReduce) { return; } if (strategy_.fuse_broadcast_ops_) { @@ -1064,29 +1087,28 @@ static int MultiDevSSAGraphBuilderRegister(const std::string &builder_mode) { return 0; } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle -#define REGISTER_MULTI_DEVICES_PASS(pass_name, pass_class) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - _reg_ssa_graph_builder_##pass_name, \ - "REGISTER_MULTI_DEVICES_PASS must be called in global namespace."); \ - int _reg_ssa_graph_builder_entry_##pass_name = \ - paddle::framework::details::MultiDevSSAGraphBuilderRegister(#pass_name); \ - REGISTER_PASS(pass_name, pass_class) \ - .RequirePassAttr(paddle::framework::details::kLossVarName) \ - .RequirePassAttr(paddle::framework::details::kPlaces) \ - .RequirePassAttr(paddle::framework::details::kLocalScopes) \ - .RequirePassAttr(paddle::framework::details::kStrategy) \ - .RequirePassAttr(paddle::framework::details::kNRanks) +#define REGISTER_MULTI_DEVICES_PASS(pass_name, pass_class) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + _reg_ssa_graph_builder_##pass_name, \ + "REGISTER_MULTI_DEVICES_PASS must be called in global namespace."); \ + int _reg_ssa_graph_builder_entry_##pass_name = \ + paddle::framework::ir::MultiDevSSAGraphBuilderRegister(#pass_name); \ + REGISTER_PASS(pass_name, pass_class) \ + .RequirePassAttr(paddle::framework::ir::kLossVarName) \ + .RequirePassAttr(paddle::framework::details::kPlaces) \ + .RequirePassAttr(paddle::framework::details::kLocalScopes) \ + .RequirePassAttr(paddle::framework::ir::kStrategy) \ + .RequirePassAttr(paddle::framework::ir::kNRanks) REGISTER_MULTI_DEVICES_PASS(reduce_mode_multi_devices_pass, - paddle::framework::details::ReduceSSAGraphBuilder); -REGISTER_MULTI_DEVICES_PASS( - all_reduce_mode_multi_devices_pass, - paddle::framework::details::AllReduceSSAGraphBuilder); + paddle::framework::ir::ReduceSSAGraphBuilder); +REGISTER_MULTI_DEVICES_PASS(all_reduce_mode_multi_devices_pass, + paddle::framework::ir::AllReduceSSAGraphBuilder); REGISTER_MULTI_DEVICES_PASS(dist_multi_devices_pass, - paddle::framework::details::DistSSAGraphBuilder); + paddle::framework::ir::DistSSAGraphBuilder); REGISTER_MULTI_DEVICES_PASS(async_multi_devices_pass, - paddle::framework::details::AsyncSSAGraphBuilder); + paddle::framework::ir::AsyncSSAGraphBuilder); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.h b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.h similarity index 96% rename from paddle/fluid/framework/details/multi_devices_graph_pass.h rename to paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.h index 0c4b3b0b8c963e99da5886f25e0df8146ce3695c..3434d45f1420f90eb14da73e8246621ca885fbb1 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.h +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.h @@ -31,7 +31,7 @@ class NCCLContextMap; namespace framework { class Scope; -namespace details { +namespace ir { constexpr char kLossVarName[] = "loss_var_name"; constexpr char kStrategy[] = "strategy"; @@ -69,8 +69,8 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { ir::Node *out_var_node, size_t loss_scale, proto::VarType::Type dtype) const; - VarHandle *CreateReduceOp(ir::Graph *result, const std::string &og, - size_t dst_dev_id) const; + details::VarHandle *CreateReduceOp(ir::Graph *result, const std::string &og, + size_t dst_dev_id) const; void CreateComputationalOp(ir::Graph *result, ir::Node *node, size_t dev_id) const; @@ -89,7 +89,7 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { ir::Graph *result, const std::vector> &bcast_varnames) const; - void SetCommunicationContext(OpHandleBase *op_handle, + void SetCommunicationContext(details::OpHandleBase *op_handle, const platform::Place &p) const; void CreateOpHandleIOs(ir::Graph *result, ir::Node *node, @@ -103,7 +103,7 @@ class MultiDevSSAGraphBuilderBase : public ir::Pass { mutable std::vector places_; mutable std::vector local_scopes_; - mutable BuildStrategy strategy_; + mutable details::BuildStrategy strategy_; mutable std::unordered_map all_vars_; }; @@ -209,6 +209,6 @@ class DistSSAGraphBuilder : public BalanceVarSSAGraphBuilder { std::unordered_set &MultiDevSSAGraphBuilder(); -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.cc similarity index 68% rename from paddle/fluid/framework/details/multi_devices_graph_print_pass.cc rename to paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.cc index 34c38ea81a9e4832f7e1b63e1e6db4ea27704c34..a6c2b28215affcfb30f66452a633eea266088906 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h" +#include "paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.h" #include #include #include @@ -21,11 +21,21 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { + +class SSAGraghBuilderWithPrinterPass : public ir::Pass { + protected: + void ApplyImpl(ir::Graph *graph) const override { + std::unique_ptr fout( + new std::ofstream(Get(kGraphvizPath))); + PADDLE_ENFORCE(fout->good()); + Get("graph_printer").Print(*graph, *fout); + } +}; template static inline void IterAllVar(const ir::Graph &graph, Callback callback) { - for (auto &each : graph.Get(kGraphVars)) { + for (auto &each : graph.Get(details::kGraphVars)) { for (auto &pair1 : each) { for (auto &pair2 : pair1.second) { callback(*pair2); @@ -33,7 +43,7 @@ static inline void IterAllVar(const ir::Graph &graph, Callback callback) { } } - for (auto &var : graph.Get(kGraphDepVars)) { + for (auto &var : graph.Get(details::kGraphDepVars)) { callback(*var); } } @@ -41,14 +51,14 @@ static inline void IterAllVar(const ir::Graph &graph, Callback callback) { void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph, std::ostream &sout) const { size_t var_id = 0; - std::unordered_map vars; + std::unordered_map vars; sout << "digraph G {\n"; - IterAllVar(graph, [&](const VarHandleBase &var) { + IterAllVar(graph, [&](const details::VarHandleBase &var) { auto *var_ptr = &var; - auto *var_handle_ptr = dynamic_cast(var_ptr); - auto *dummy_ptr = dynamic_cast(var_ptr); + auto *var_handle_ptr = dynamic_cast(var_ptr); + auto *dummy_ptr = dynamic_cast(var_ptr); size_t cur_var_id = var_id++; vars[var_ptr] = cur_var_id; @@ -65,7 +75,7 @@ void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph, }); size_t op_id = 0; - for (auto &op : ir::FilterByNodeWrapper(graph)) { + for (auto &op : ir::FilterByNodeWrapper(graph)) { std::string op_name = "op_" + std::to_string(op_id++); sout << op_name << " [label=\"" << op->Name() << "\", shape=rect]" << std::endl; @@ -82,10 +92,10 @@ void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph, sout << "}\n"; } -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle REGISTER_PASS(multi_devices_print_pass, - paddle::framework::details::SSAGraghBuilderWithPrinter) - .RequirePassAttr(paddle::framework::details::kGraphvizPath); + paddle::framework::ir::SSAGraghBuilderWithPrinterPass) + .RequirePassAttr(paddle::framework::ir::kGraphvizPath); diff --git a/paddle/fluid/framework/details/multi_devices_graph_print_pass.h b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.h similarity index 76% rename from paddle/fluid/framework/details/multi_devices_graph_print_pass.h rename to paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.h index 6d57d75e8a5541ac39e6dbe231c3f47daaa4206a..8562856e3d5fc923d453c8c646269c3d7559b6ce 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_print_pass.h +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.h @@ -24,7 +24,7 @@ namespace paddle { namespace framework { -namespace details { +namespace ir { constexpr char kGraphvizPath[] = "debug_graphviz_path"; @@ -39,16 +39,6 @@ class GraphvizSSAGraphPrinter : public SSAGraphPrinter { void Print(const ir::Graph& graph, std::ostream& sout) const override; }; -class SSAGraghBuilderWithPrinter : public ir::Pass { - protected: - void ApplyImpl(ir::Graph* graph) const override { - std::unique_ptr fout( - new std::ofstream(Get(kGraphvizPath))); - PADDLE_ENFORCE(fout->good()); - Get("graph_printer").Print(*graph, *fout); - } -}; - -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/sequential_execution_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/sequential_execution_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..3a5333d08d4444e4c3d402c5dc549c40c87e4e99 --- /dev/null +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/sequential_execution_pass.cc @@ -0,0 +1,113 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_helper.h" +#include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/framework/op_proto_maker.h" + +namespace paddle { +namespace framework { +namespace ir { + +static bool IsSameOpDesc(OpDesc *op1, OpDesc *op2) { + return op1->Type() == op2->Type() && op1->Inputs() == op2->Inputs() && + op1->Outputs() == op2->Outputs(); +} + +class SequentialExecutionPass : public ir::Pass { + protected: + void ApplyImpl(ir::Graph *graph) const override { + // FIXME(zjl): Insert dependencies between some distributed ops may cause + // the multi_devices_graph_pass fails. So we skip these ops here. + // Indeed, maybe we should not insert dependencies between these ops + // casually, which may cause deadlock easily. + // We should add more skipped distributed ops when found errors in + // multi_devices_graph_pass + static std::unordered_set skip_dist_ops{ + "send", "recv", "send_barrier", "fetch_barrier"}; + + auto &ops = + graph->Get>(details::kStaleProgramOpDescs); + std::vector op_node_list; + op_node_list.reserve(ops.size()); + + std::unordered_map op_deps; + std::unordered_map> pending_ops; + std::unordered_set ready_ops; + + for (ir::Node *node : graph->Nodes()) { + if (!node->IsOp()) continue; + std::unordered_set preceding_ops; + for (auto *in : node->inputs) { + PADDLE_ENFORCE(in->IsVar(), + "Preceding Node of Op Nodes must be Var Node"); + if (in->inputs.empty()) continue; + PADDLE_ENFORCE(in->inputs.size() == 1 && in->inputs[0]->IsOp(), + "Preceding Op Node of Var Node must be unique"); + preceding_ops.insert(in->inputs[0]); + pending_ops[in->inputs[0]].insert(node); + } + op_deps[node] = preceding_ops.size(); + if (preceding_ops.empty()) { + ready_ops.insert(node); + } + } + + for (auto *op_desc : ops) { + ir::Node *found_node = nullptr; + for (auto *node : ready_ops) { + if (IsSameOpDesc(op_desc, node->Op())) { + PADDLE_ENFORCE(found_node == nullptr, + "Found multiple op_desc in graph: %s", + op_desc->Type()); + found_node = node; + } + } + + PADDLE_ENFORCE_NOT_NULL(found_node, "Cannot find op_desc in graph: %s", + op_desc->Type()); + for (auto *pending_op : pending_ops[found_node]) { + if (--op_deps.at(pending_op) == 0) { + ready_ops.insert(pending_op); + } + } + ready_ops.erase(found_node); + if (skip_dist_ops.count(op_desc->Type()) == 0) { + op_node_list.push_back(found_node); + } + } + + for (size_t i = 1; i < op_node_list.size(); ++i) { + auto *dep_var = graph->CreateControlDepVar(); + op_node_list[i]->inputs.push_back(dep_var); + op_node_list[i - 1]->outputs.push_back(dep_var); + dep_var->outputs.push_back(op_node_list[i]); + dep_var->inputs.push_back(op_node_list[i - 1]); + VLOG(10) << "Add dependencies between " << op_node_list[i - 1]->Name() + << " and " << op_node_list[i]->Name(); + } + } +}; +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(sequential_execution_pass, + paddle::framework::ir::SequentialExecutionPass) + .RequireGraphAttr(paddle::framework::details::kStaleProgramOpDescs); diff --git a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc index 7cab9c353d35cb6d725d787986e992b6853d42ce..017e3ef234c95da44bcfb6858c06a48aa973164b 100644 --- a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc +++ b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc @@ -25,7 +25,8 @@ namespace framework { namespace ir { void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, - std::string op_type) { + const std::string& op_type, + const std::string& quant_type) { const std::string pattern_name = "quant_dequant_fuse"; // FusePassBase::Init(pattern_name, graph); const int kNumFields = 5; @@ -38,7 +39,7 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, GraphPatternDetector gpd; auto* x = gpd.mutable_pattern() ->NewNode("x") - ->assert_is_op_input("fake_quantize_range_abs_max", "X") + ->assert_is_op_input(quant_type, "X") ->AsInput(); std::string quantized_op_type = ""; @@ -46,6 +47,9 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, if (op_type == "conv2d") { quantized_op_type = "conv2d"; weight_name = "Filter"; + } else if (op_type == "depthwise_conv2d") { + quantized_op_type = "depthwise_conv2d"; + weight_name = "Filter"; } else if (op_type == "conv2d_fusion") { quantized_op_type = "conv2d_fusion"; weight_name = "Filter"; @@ -62,7 +66,7 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, } patterns::QuantDequantOpFuse pattern(gpd.mutable_pattern(), pattern_name); - pattern(x, quantized_op_type, weight_name, times); + pattern(x, quantized_op_type, weight_name, times, quant_type); auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, Graph* g) { @@ -103,7 +107,6 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, std::unordered_set delete_nodes; for (int i = 0; i < times; i++) { - // max_range = (range * range) / weight_scale float max_range = boost::get( nodes[i * kNumFields + kDequantOpOffset]->Op()->GetAttr("max_range")); float weight_scale = (range * range) / max_range; @@ -118,7 +121,8 @@ void RunQuantDequant(ir::Graph* graph, Scope* scope, int times, new_op_desc.SetType(quantized_op_type); if (quantized_op_type == "conv2d" || - quantized_op_type == "conv2d_fusion") { + quantized_op_type == "conv2d_fusion" || + quantized_op_type == "depthwise_conv2d") { new_op_desc.SetInput("Input", {new_input}); new_op_desc.SetOutput("Output", {new_output}); } else if (quantized_op_type == "fc") { @@ -156,11 +160,17 @@ void QuantDequantFusePass::ApplyImpl(ir::Graph* graph) const { const std::string pattern_name = "quant_dequant_fuse"; FusePassBase::Init(pattern_name, graph); - std::unordered_set quantized_op_types = {"conv2d", "mul"}; + std::unordered_set quant_types = { + "fake_quantize_range_abs_max", "fake_quantize_moving_average_abs_max"}; + + std::unordered_set quantized_op_types = {"conv2d", "mul", + "depthwise_conv2d"}; auto* scope = param_scope(); - for (auto& op_type : quantized_op_types) { - for (int i = 1; i <= 6; i++) { - RunQuantDequant(graph, scope, i, op_type); + for (auto& quant_type : quant_types) { + for (auto& op_type : quantized_op_types) { + for (int i = 6; i >= 1; i--) { + RunQuantDequant(graph, scope, i, op_type, quant_type); + } } } } diff --git a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..e55783637a6e08578ef7717ba9768f7eece7ca8f --- /dev/null +++ b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc @@ -0,0 +1,93 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "paddle/fluid/framework/ir/graph_viz_pass.h" +#include "paddle/fluid/framework/ir/shuffle_channel_detect_pass.h" + +namespace paddle { +namespace framework { +namespace ir { + +#define GET_IR_NODE(node__) GET_IR_NODE_FROM_SUBGRAPH(node__, node__, pattern); +#define GET_NODES \ + GET_IR_NODE(reshape1_op); \ + GET_IR_NODE(reshape1_out); \ + GET_IR_NODE(transpose_op); \ + GET_IR_NODE(transpose_out); \ + GET_IR_NODE(reshape2_op); \ + GET_IR_NODE(reshape2_out); + +void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const { + const std::string pattern_name = "shufflechannel_pattern"; + FusePassBase::Init(pattern_name, graph); + + GraphPatternDetector gpd; + auto* x = gpd.mutable_pattern() + ->NewNode("x") + ->assert_is_op_input("reshape2", "X") + ->AsInput(); + + patterns::ShuffleChannelPattern pattern(gpd.mutable_pattern(), pattern_name); + pattern(x); + + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { + GET_NODES; + + PADDLE_ENFORCE(subgraph.count(x)); + auto* input_node = subgraph.at(x); + auto reshape1_desc = reshape1_op->Op(); + auto reshape2_desc = reshape2_op->Op(); + std::string input_name = input_node->Name(); + std::string output_name = reshape2_out->Name(); + + auto reshape1_shape = + boost::get>(reshape1_desc->GetAttr("shape")); + auto reshape2_shape = + boost::get>(reshape2_desc->GetAttr("shape")); + + int i_c = reshape1_shape[2]; + int o_c = reshape2_shape[1]; + int group = o_c / i_c; + + framework::OpDesc new_op_desc; + new_op_desc.SetType("shuffle_channel"); + new_op_desc.SetInput("X", {input_name}); + new_op_desc.SetOutput("Out", {output_name}); + + new_op_desc.SetAttr("group", group); + new_op_desc.Flush(); + + // Create a new node for the fused op. + auto* new_op = graph->CreateOpNode(&new_op_desc); + + IR_NODE_LINK_TO(input_node, new_op); + IR_NODE_LINK_TO(new_op, reshape2_out); + + // Delete the unneeded nodes. + GraphSafeRemoveNodes(graph, {reshape1_op, reshape1_out, transpose_op, + transpose_out, reshape2_op}); + }; + + gpd(graph, handler); +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(shuffle_channel_detect_pass, + paddle::framework::ir::ShuffleChannelDetectPass); diff --git a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.h b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.h similarity index 74% rename from paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.h rename to paddle/fluid/framework/ir/shuffle_channel_detect_pass.h index 54d52d6240a830dfc66f13c26fb79a896897f980..008f8013efd28b3cdc5a846662653e07e45e3985 100644 --- a/paddle/fluid/framework/details/modify_op_lock_and_record_event_pass.h +++ b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.h @@ -13,19 +13,22 @@ // limitations under the License. #pragma once - -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/pass.h" +#include +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" namespace paddle { namespace framework { -namespace details { +namespace ir { + +class ShuffleChannelDetectPass : public FusePassBase { + public: + virtual ~ShuffleChannelDetectPass() {} -class ModifyOpLockAndRecordEventPass : public ir::Pass { protected: void ApplyImpl(ir::Graph* graph) const override; }; -} // namespace details +} // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/sync_batch_norm_pass.cc b/paddle/fluid/framework/ir/sync_batch_norm_pass.cc index f4f924a604a231d1a25e169c4dd13f51eb90f266..25207ffc1e9f540131f5b7c0336d308831aec19f 100644 --- a/paddle/fluid/framework/ir/sync_batch_norm_pass.cc +++ b/paddle/fluid/framework/ir/sync_batch_norm_pass.cc @@ -12,30 +12,32 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/framework/ir/sync_batch_norm_pass.h" #include #include #include +#include "paddle/fluid/framework/ir/pass.h" namespace paddle { namespace framework { namespace ir { -void SyncBatchNormPass::ApplyImpl(ir::Graph* graph) const { - VLOG(3) << "Use synchronous batch norm"; - for (const Node* n : graph->Nodes()) { - if (n->IsOp()) { - auto* op = n->Op(); - if (op->Type() == "batch_norm") { - op->SetType("sync_batch_norm"); - } - if (op->Type() == "batch_norm_grad") { - op->SetType("sync_batch_norm_grad"); +class SyncBatchNormPass : public Pass { + protected: + void ApplyImpl(ir::Graph *graph) const override { + VLOG(3) << "Use synchronous batch norm"; + for (const Node *n : graph->Nodes()) { + if (n->IsOp()) { + auto *op = n->Op(); + if (op->Type() == "batch_norm") { + op->SetType("sync_batch_norm"); + } + if (op->Type() == "batch_norm_grad") { + op->SetType("sync_batch_norm_grad"); + } } } } -} - +}; } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/sync_batch_norm_pass_tester.cc b/paddle/fluid/framework/ir/sync_batch_norm_pass_tester.cc index 894f96050edd607e1ea7df1c319cfeb3570662e5..90d214116d73554040e339fa01a24eed9255696a 100644 --- a/paddle/fluid/framework/ir/sync_batch_norm_pass_tester.cc +++ b/paddle/fluid/framework/ir/sync_batch_norm_pass_tester.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/sync_batch_norm_pass.h" #include - +#include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/framework/program_desc.h" namespace paddle { namespace framework { namespace ir { diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index de8766809c66a92edaab41c52d8b233229ccc3ba..fae33f55b054b1dde8ac7bf7cd931d5de911c5aa 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1148,7 +1148,7 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType( proto::VarType::Type tmp = t->type(); PADDLE_ENFORCE( tmp == data_type || data_type == dafault_data_type, - "DataType of Paddle Op %s %s must be the same. Get (%d) != (%d)", + "DataType of Paddle Op %s %s must be the same. Get (%s) != (%s)", Type(), input.first, DataTypeToString(data_type), DataTypeToString(tmp)); data_type = tmp; diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index d94326563fa9ec4b532927d8474d67f9a4941d44..4bc94b4c5cd537937a17653713a888fc28fd0177 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -386,9 +386,10 @@ class ExecutionContext { template T& GetKernelConfig(int idx) const { - PADDLE_ENFORCE(kernel_configs_ && kernel_configs_->size() > idx, - "%s selected kernel doesn't have kernel config %lu <= %d", - op_.Type().c_str(), kernel_configs_->size(), idx); + PADDLE_ENFORCE( + kernel_configs_ && kernel_configs_->size() > static_cast(idx), + "%s selected kernel doesn't have kernel config %lu <= %d", + op_.Type().c_str(), kernel_configs_->size(), idx); return *boost::get>(kernel_configs_->at(idx)); } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index c4bf2b7e8c017b22f917c9f9bd40e75b8cde08b2..f400e8a5cc031cb0982860a6c2c1c9aba77f35dc 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -23,11 +23,11 @@ limitations under the License. */ #include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" #include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h" -#include "paddle/fluid/framework/details/reference_count_pass_helper.h" #include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" #include "paddle/fluid/framework/ir/graph.h" #include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h" #include "paddle/fluid/platform/profiler.h" #ifdef WITH_GPERFTOOLS @@ -46,6 +46,7 @@ static std::once_flag gProfileOnce; #ifdef WITH_GPERFTOOLS static bool gProfileStarted = false; #endif + class ParallelExecutorPrivate { public: explicit ParallelExecutorPrivate(const std::vector &places) @@ -57,7 +58,7 @@ class ParallelExecutorPrivate { gProfileStarted = true; #else LOG(WARNING) << "Paddle is not compiled with gperftools. " - "FLAGS_pe_profile_fname will be ignored"; + "FLAGS_pe_profile_fname will be ignored"; #endif }); } @@ -110,9 +111,9 @@ class ParallelExecutorPrivate { // global_ref_cnts_ is only initialized when ParallelExecutor constructs, and // then keeps unchanged // Before each iteration, runtime_ref_cnts_ is reset to global_ref_cnts_ - std::vector global_ref_cnts_; - std::vector runtime_ref_cnts_; - details::GarbageCollectorMap gcs_; + std::vector global_ref_cnts_; + std::vector runtime_ref_cnts_; + ir::GarbageCollectorMap gcs_; }; ir::Graph *ParallelExecutorPrivate::PrepareGCAndRefCnts( @@ -150,25 +151,23 @@ ir::Graph *ParallelExecutorPrivate::PrepareGCAndRefCnts( } if (!gcs_.empty()) { - std::vector last_live_ops_of_vars; + std::vector last_live_ops_of_vars; auto ref_cnt_pass = ir::PassRegistry::Instance().Get("reference_count_pass"); - ref_cnt_pass->SetNotOwned(details::kGlobalReferenceCount, - &global_ref_cnts_); - ref_cnt_pass->SetNotOwned(details::kLastLiveOpsOfVars, - &last_live_ops_of_vars); + ref_cnt_pass->SetNotOwned(ir::kGlobalReferenceCount, &global_ref_cnts_); + ref_cnt_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars); graph = ref_cnt_pass->Apply(graph); VLOG(10) << "ReferenceCountPass Applied"; auto eager_deletion_pass = ir::PassRegistry::Instance().Get("eager_deletion_pass"); - eager_deletion_pass->SetNotOwned(details::kRuntimeReferenceCount, + eager_deletion_pass->SetNotOwned(ir::kRuntimeReferenceCount, &runtime_ref_cnts_); - eager_deletion_pass->SetNotOwned(details::kGarbageCollector, &gcs_); - eager_deletion_pass->SetNotOwned(details::kLastLiveOpsOfVars, + eager_deletion_pass->SetNotOwned(ir::kGarbageCollector, &gcs_); + eager_deletion_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars); - eager_deletion_pass->SetNotOwned(details::kAllPlaces, &places_); + eager_deletion_pass->SetNotOwned(ir::kAllPlaces, &places_); graph = eager_deletion_pass->Apply(graph); VLOG(10) << "EagerDeletionPass Applied"; } @@ -179,6 +178,20 @@ std::vector &ParallelExecutor::GetLocalScopes() { return member_->local_scopes_; } +void ParallelExecutor::DropLocalExeScopes() { + auto executor = dynamic_cast( + member_->executor_.get()); + if (executor) { + executor->DropLocalExeScopes(); + } +} + +bool ParallelExecutor::NeedCreateLocalExeScope() { + auto executor = dynamic_cast( + member_->executor_.get()); + return executor && executor->NeedCreateLocalExeScope(); +} + ParallelExecutor::ParallelExecutor(const std::vector &places, const std::vector &bcast_vars, const std::string &loss_var_name, @@ -333,7 +346,7 @@ ParallelExecutor::ParallelExecutor(const std::vector &places, graph = build_strategy.Apply(graph, {member_->places_[0]}, loss_var_name, {member_->local_scopes_[0]}, 1, member_->use_cuda_); - for (int i = 1; i < member_->places_.size(); ++i) { + for (size_t i = 1; i < member_->places_.size(); ++i) { graphs[i] = build_strategy.Apply( graphs[i], {member_->places_[i]}, loss_var_name, {member_->local_scopes_[i]}, 1, member_->use_cuda_); @@ -344,8 +357,8 @@ ParallelExecutor::ParallelExecutor(const std::vector &places, member_->local_scopes_, member_->nranks_, member_->use_cuda_); } - #endif + auto max_memory_size = GetEagerDeletionThreshold(); VLOG(10) << "Eager Deletion Threshold " << static_cast(max_memory_size) / (1 << 30); diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index 5756627fbd8583428014e24e5aa3f626c908ce1c..2de6b7f73d2a03a4b9f23b49142f677df6120806 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -58,6 +58,11 @@ class ParallelExecutor { std::vector &GetLocalScopes(); + void DropLocalExeScopes(); + + // This API is used to check whether DropLocalExeScopes work. + bool NeedCreateLocalExeScope(); + /** * Feed tensors to local scopes. The size of tensors should be equal to the * size of local scopes. diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index ef5404e4755817cefc925acbf4882ff86d1f0ba3..8dabecac8ab42ee0fb6b57048f3a1c8223d0b0b1 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -25,8 +25,9 @@ inline const T* Tensor::data() const { check_memory_size(); bool valid = std::is_same::value || type_ == DataTypeTrait::DataType; - PADDLE_ENFORCE(valid, "Tensor holds the wrong type, it holds %d", - DataTypeToString(type_)); + PADDLE_ENFORCE( + valid, "Tensor holds the wrong type, it holds %s, but desires to be %s", + DataTypeToString(type_), DataTypeToString(DataTypeTrait::DataType)); return reinterpret_cast( reinterpret_cast(holder_->ptr()) + offset_); @@ -39,7 +40,9 @@ inline T* Tensor::data() { check_memory_size(); bool valid = std::is_same::value || type_ == DataTypeTrait::DataType; - PADDLE_ENFORCE(valid, "Tensor holds the wrong type, it holds %s", type_); + PADDLE_ENFORCE( + valid, "Tensor holds the wrong type, it holds %s, but desires to be %s", + DataTypeToString(type_), DataTypeToString(DataTypeTrait::DataType)); return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); } diff --git a/paddle/fluid/inference/anakin/convert/CMakeLists.txt b/paddle/fluid/inference/anakin/convert/CMakeLists.txt index d3d1522dccf0d8af4f26eec4e0c57257279880e0..5d85525a652a6016694e012853c95aca086b3fd9 100644 --- a/paddle/fluid/inference/anakin/convert/CMakeLists.txt +++ b/paddle/fluid/inference/anakin/convert/CMakeLists.txt @@ -1,4 +1,9 @@ -cc_library(anakin_op_converter SRCS fc.cc conv2d.cc conv2d_fusion.cc elementwise.cc activation.cc pool2d.cc concat.cc split.cc relu.cc softmax.cc batch_norm.cc reshape.cc flatten.cc transpose.cc density_prior_box.cc detection_out.cc scale.cc dropout.cc im2sequence.cc sum.cc DEPS anakin_engine framework_proto scope op_registry) +cc_library(anakin_op_converter SRCS fc.cc conv2d.cc conv2d_fusion.cc +elementwise.cc activation.cc pool2d.cc concat.cc split.cc relu.cc softmax.cc +batch_norm.cc reshape.cc flatten.cc transpose.cc density_prior_box.cc +detection_out.cc scale.cc dropout.cc im2sequence.cc sum.cc affine_channel.cc +roi_align.cc shuffle_channel.cc helper.cc DEPS anakin_engine framework_proto +scope op_registry gtest) cc_test(test_anakin_fc SRCS test_fc_op.cc DEPS anakin_op_converter mul_op SERIAL) cc_test(test_anakin_conv2d SRCS test_conv2d_op.cc DEPS anakin_op_converter conv_op im2col vol2col depthwise_conv SERIAL) @@ -14,5 +19,5 @@ cc_test(test_anakin_flatten SRCS test_flatten_op.cc DEPS anakin_op_converter fla cc_test(test_anakin_transpose SRCS test_transpose_op.cc DEPS anakin_op_converter transpose_op SERIAL) cc_test(test_anakin_batch_norm SRCS test_batch_norm_op.cc DEPS anakin_op_converter batch_norm_op SERIAL) cc_test(test_anakin_dropout SRCS test_dropout_op.cc DEPS anakin_op_converter dropout_op SERIAL) -#cc_test(test_anakin_im2sequence SRCS test_im2sequence_op.cc DEPS anakin_op_converter im2sequence_op im2col) cc_test(test_anakin_sum SRCS test_sum_op.cc DEPS anakin_op_converter sum_op selected_rows_functor SERIAL) +cc_test(test_anakin_affine_channel SRCS test_affine_channel_op.cc DEPS anakin_op_converter affine_channel_op SERIAL) diff --git a/paddle/fluid/inference/anakin/convert/activation.cc b/paddle/fluid/inference/anakin/convert/activation.cc index a9aeb19ffd5f04c03df593e8f48976e7fa6155ab..523571f1aa8b5a9d17f97a1fd765fe9f1ac95b22 100644 --- a/paddle/fluid/inference/anakin/convert/activation.cc +++ b/paddle/fluid/inference/anakin/convert/activation.cc @@ -16,16 +16,13 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -ActivationOpConverter::ActivationOpConverter(const std::string &op_type) +template +ActivationOpConverter::ActivationOpConverter( + const std::string &op_type) : op_type_(op_type) { auto it = anakin_op_types_.find(op_type_); PADDLE_ENFORCE(it != anakin_op_types_.end(), @@ -33,10 +30,10 @@ ActivationOpConverter::ActivationOpConverter(const std::string &op_type) anakin_op_type_ = it->second; } -void ActivationOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ActivationOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -44,8 +41,17 @@ void ActivationOpConverter::operator()(const framework::proto::OpDesc &op, auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); auto input_name = op_desc.Input("X").front(); auto output_name = op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Activation", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "type", anakin_op_type_); + this->engine_->AddOp(op_name, "Activation", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "type", anakin_op_type_); + + if (op_type_ == "swish") { + float beta = boost::get(op_desc.GetAttr("beta")); + this->engine_->AddOpAttr(op_name, "clip_relu_num", beta); + } + if (op_type_ == "relu6") { + float threshold = boost::get(op_desc.GetAttr("threshold")); + this->engine_->AddOpAttr(op_name, "clip_relu_num", threshold); + } } } // namespace anakin @@ -54,3 +60,5 @@ void ActivationOpConverter::operator()(const framework::proto::OpDesc &op, REGISTER_ANAKIN_OP_CONVERTER(sigmoid, SigmoidOpConverter); REGISTER_ANAKIN_OP_CONVERTER(tanh, TanhOpConverter); +REGISTER_ANAKIN_OP_CONVERTER(swish, SwishOpConverter); +REGISTER_ANAKIN_OP_CONVERTER(relu6, Relu6OpConverter); diff --git a/paddle/fluid/inference/anakin/convert/activation.h b/paddle/fluid/inference/anakin/convert/activation.h index 592a3d5bd9d1272aae8a13d0d0acc77f8990c6b3..a2475e492c408008fd7b22815a03aedfd3d04650 100644 --- a/paddle/fluid/inference/anakin/convert/activation.h +++ b/paddle/fluid/inference/anakin/convert/activation.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class ActivationOpConverter : public AnakinOpConverter { +template +class ActivationOpConverter : public AnakinOpConverter { public: explicit ActivationOpConverter(const std::string &op_type); @@ -36,18 +37,36 @@ class ActivationOpConverter : public AnakinOpConverter { std::string op_type_; std::string anakin_op_type_; std::map anakin_op_types_{{"tanh", "TanH"}, - {"sigmoid", "Sigmoid"}}; + {"sigmoid", "Sigmoid"}, + {"relu6", "ClippedRelu"}, + {"swish", "Swish"}}; }; -class TanhOpConverter : public ActivationOpConverter { +template +class TanhOpConverter : public ActivationOpConverter { public: - TanhOpConverter() : ActivationOpConverter("tanh") {} + TanhOpConverter() : ActivationOpConverter("tanh") {} }; -class SigmoidOpConverter : public ActivationOpConverter { +template +class SigmoidOpConverter : public ActivationOpConverter { public: - SigmoidOpConverter() : ActivationOpConverter("sigmoid") {} + SigmoidOpConverter() + : ActivationOpConverter("sigmoid") {} }; + +template +class Relu6OpConverter : public ActivationOpConverter { + public: + Relu6OpConverter() : ActivationOpConverter("relu6") {} +}; + +template +class SwishOpConverter : public ActivationOpConverter { + public: + SwishOpConverter() : ActivationOpConverter("swish") {} +}; + } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.cc b/paddle/fluid/inference/anakin/convert/affine_channel.cc new file mode 100644 index 0000000000000000000000000000000000000000..534e7dca81db959115283d65018ec33cc7a0924c --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/affine_channel.cc @@ -0,0 +1,55 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/affine_channel.h" +#include +#include +#include +#include "paddle/fluid/inference/anakin/convert/helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +template +void AffineChannelOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + auto input_name = op_desc.Input("X").front(); + auto output_name = op_desc.Output("Out").front(); + this->engine_->AddOp(op_name, "AffineChannel", {input_name}, {output_name}); + + // Copy the Scale to CPUPlace and get the pointer. + auto *scale_v = scope.FindVar(op_desc.Input("Scale").front()); + PADDLE_ENFORCE_NOT_NULL(scale_v); + auto weight1 = pblock_from_var(*scale_v, this->engine_); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + + // Copy the Bias to CPUPlace and get the pointer. + auto *bias_v = scope.FindVar(op_desc.Input("Bias").front()); + PADDLE_ENFORCE_NOT_NULL(bias_v); + auto weight2 = pblock_from_var(*bias_v, this->engine_); + this->engine_->AddOpAttr(op_name, "weight_2", *weight2); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(affine_channel, AffineChannelOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/affine_channel.h b/paddle/fluid/inference/anakin/convert/affine_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..443f6101288af4ef6b82a9370f83b7b0c07e23c5 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/affine_channel.h @@ -0,0 +1,40 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +template +class AffineChannelOpConverter : public AnakinOpConverter { + public: + AffineChannelOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) override; + virtual ~AffineChannelOpConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.cc b/paddle/fluid/inference/anakin/convert/batch_norm.cc index 38cf6172027b3b200a378a61b6d5b395cc571de7..b41f5dc925208d38ae233f0b3d1ca425537b9b47 100644 --- a/paddle/fluid/inference/anakin/convert/batch_norm.cc +++ b/paddle/fluid/inference/anakin/convert/batch_norm.cc @@ -18,107 +18,64 @@ #include #include #include - -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; +#include "paddle/fluid/inference/anakin/convert/helper.h" namespace paddle { namespace inference { namespace anakin { -void BatchNormOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void BatchNormOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Output("Y").size(), 1); std::map inputs; for (auto k : {"X", "Scale", "Bias", "Mean", "Variance"}) { PADDLE_ENFORCE_EQ(op_desc.Input(k).size(), 1UL); - auto v = op_desc.Input(k).front(); - inputs.insert({k, v}); } + auto input = op_desc.Input("X").front(); auto output = op_desc.Output("Y").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Y").front(); auto epsilon = boost::get(op_desc.GetAttr("epsilon")); - // auto momentum = boost::get(op_desc.GetAttr("momentum")); auto bn_op_name = op_name + ":bn"; auto bn_output = bn_op_name + "_output"; - engine_->AddOp(bn_op_name, "BatchNorm", {inputs["X"]}, {bn_output}); - engine_->AddOpAttr(bn_op_name, "epsilon", epsilon); - engine_->AddOpAttr(bn_op_name, "momentum", static_cast(1.0)); + this->engine_->AddOp(bn_op_name, "BatchNorm", {input}, {bn_output}); + this->engine_->AddOpAttr(bn_op_name, "epsilon", epsilon); + this->engine_->AddOpAttr(bn_op_name, "momentum", static_cast(1.0)); auto scale_op_name = op_name + ":scale"; - auto get_lod_tensor = [this, &scope, &op_name](const std::string &var_name, - framework::LoDTensor *tensor) { - auto *v = scope.FindVar(var_name); - PADDLE_ENFORCE_NOT_NULL(v); - auto *t = v->GetMutable(); - tensor->Resize(t->dims()); - TensorCopySync(*t, platform::CPUPlace(), tensor); - }; - - framework::LoDTensor bias_t; - framework::LoDTensor mean_t; - framework::LoDTensor scale_t; - framework::LoDTensor variance_t; - get_lod_tensor(inputs["Bias"], &bias_t); - get_lod_tensor(inputs["Mean"], &mean_t); - get_lod_tensor(inputs["Scale"], &scale_t); - get_lod_tensor(inputs["Variance"], &variance_t); - - auto fill_shape = [](size_t n, std::vector shape) { - shape.insert(shape.begin(), 1); - if (shape.size() < n) { - shape.insert(shape.end(), n - shape.size(), 1); - } - return shape; - }; - Shape shape1(fill_shape(4, framework::vectorize2int(mean_t.dims()))); - Shape shape2(fill_shape(4, framework::vectorize2int(variance_t.dims()))); - auto *weight1 = - GraphGlobalMem::Global().template new_block(shape1); - auto *mean_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(mean_t.data(), mean_t.numel(), mean_data); - engine_->AddOpAttr(bn_op_name, "weight_1", *weight1); - - auto *weight2 = - GraphGlobalMem::Global().template new_block(shape2); - auto *variance_data = - static_cast(weight2->h_tensor().mutable_data()); - std::copy_n(variance_t.data(), variance_t.numel(), variance_data); - engine_->AddOpAttr(bn_op_name, "weight_2", *weight2); - - Shape shape3(std::vector({1, 1, 1, 1})); - auto *weight3 = - GraphGlobalMem::Global().template new_block(shape3); - auto *alpha_data = static_cast(weight3->h_tensor().mutable_data()); - float weight3_data[] = {1}; - std::copy(std::begin(weight3_data), std::end(weight3_data), alpha_data); - engine_->AddOpAttr(bn_op_name, "weight_3", *weight3); - - Shape scale_shape(fill_shape(4, framework::vectorize2int(scale_t.dims()))); - auto *scale = - GraphGlobalMem::Global().template new_block(scale_shape); - auto *scale_data = static_cast(scale->h_tensor().mutable_data()); - std::copy_n(scale_t.data(), scale_t.numel(), scale_data); - - Shape bias_shape(fill_shape(4, framework::vectorize2int(bias_t.dims()))); - auto *bias = - GraphGlobalMem::Global().template new_block(bias_shape); - auto *bias_data = static_cast(bias->h_tensor().mutable_data()); - std::copy_n(bias_t.data(), bias_t.numel(), bias_data); - - engine_->AddOp(scale_op_name, "Scale", {bn_output}, {output}); - engine_->AddOpAttr(scale_op_name, "axis", 1); - engine_->AddOpAttr(scale_op_name, "num_axes", 1); - engine_->AddOpAttr(scale_op_name, "bias_term", true); - engine_->AddOpAttr(scale_op_name, "weight_1", *scale); - engine_->AddOpAttr(scale_op_name, "weight_2", *bias); + this->engine_->AddOp(scale_op_name, "Scale", {bn_output}, {output}); + this->engine_->AddOpAttr(scale_op_name, "axis", 1); + this->engine_->AddOpAttr(scale_op_name, "num_axes", 1); + this->engine_->AddOpAttr(scale_op_name, "bias_term", true); + + auto *mean_v = scope.FindVar(op_desc.Input("Mean").front()); + PADDLE_ENFORCE_NOT_NULL(mean_v); + auto weight1 = pblock_from_var(*mean_v, this->engine_); + this->engine_->AddOpAttr(bn_op_name, "weight_1", *weight1); + + auto *variance_v = scope.FindVar(op_desc.Input("Variance").front()); + PADDLE_ENFORCE_NOT_NULL(variance_v); + auto weight2 = + pblock_from_var(*variance_v, this->engine_); + this->engine_->AddOpAttr(bn_op_name, "weight_2", *weight2); + + auto *weight3 = pblock_from_vector( + std::vector({1}), this->engine_); + this->engine_->AddOpAttr(bn_op_name, "weight_3", *weight3); + + auto *scale_v = scope.FindVar(op_desc.Input("Scale").front()); + PADDLE_ENFORCE_NOT_NULL(scale_v); + auto scale = pblock_from_var(*scale_v, this->engine_); + this->engine_->AddOpAttr(scale_op_name, "weight_1", *scale); + + auto *bias_v = scope.FindVar(op_desc.Input("Bias").front()); + PADDLE_ENFORCE_NOT_NULL(bias_v); + auto bias = pblock_from_var(*bias_v, this->engine_); + this->engine_->AddOpAttr(scale_op_name, "weight_2", *bias); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.h b/paddle/fluid/inference/anakin/convert/batch_norm.h index c56735f15b435b46cf9f623bd284b5731a36c327..52156aeb0283af9419c77490bbaded5bb5f45f4b 100644 --- a/paddle/fluid/inference/anakin/convert/batch_norm.h +++ b/paddle/fluid/inference/anakin/convert/batch_norm.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class BatchNormOpConverter : public AnakinOpConverter { +template +class BatchNormOpConverter : public AnakinOpConverter { public: BatchNormOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/concat.cc b/paddle/fluid/inference/anakin/convert/concat.cc index ae90c083690da6e108a05460de68be2eb0cd9b48..584a82ead43fa75f0117cf524151bbd75cf54ba6 100644 --- a/paddle/fluid/inference/anakin/convert/concat.cc +++ b/paddle/fluid/inference/anakin/convert/concat.cc @@ -15,34 +15,23 @@ #include "paddle/fluid/inference/anakin/convert/concat.h" #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; - namespace paddle { namespace inference { namespace anakin { -void ConcatOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ConcatOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); int axis = boost::get(op_desc.GetAttr("axis")); auto input_names = op_desc.Input("X"); - // PADDLE_ENFORCE(axis > 0, - // "The axis attr of Concat op should be large than 0 for trt"); auto y_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Concat", input_names, {y_name}); - engine_->AddOpAttr(op_name, "axis", axis); + this->engine_->AddOp(op_name, "Concat", input_names, {y_name}); + this->engine_->AddOpAttr(op_name, "axis", axis); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/concat.h b/paddle/fluid/inference/anakin/convert/concat.h index 974ff689bfef681f8993d5dbb0dbbbdde91f33bd..fb5514affa78d254476faf41bd09e21f41d2090d 100644 --- a/paddle/fluid/inference/anakin/convert/concat.h +++ b/paddle/fluid/inference/anakin/convert/concat.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class ConcatOpConverter : public AnakinOpConverter { +template +class ConcatOpConverter : public AnakinOpConverter { public: ConcatOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/conv2d.cc b/paddle/fluid/inference/anakin/convert/conv2d.cc index 308f14604b9c83f2278499359328109d31f9ff17..70e0adf5ead45dc93c31f5d8aecffd7213b35954 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d.cc @@ -16,21 +16,18 @@ #include #include #include +#include "paddle/fluid/inference/anakin/convert/helper.h" -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void Conv2dOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void Conv2dOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1UL); PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1UL); @@ -39,46 +36,69 @@ void Conv2dOpConverter::operator()(const framework::proto::OpDesc &op, auto input_name = op_desc.Input("Input").front(); auto output_name = op_desc.Output("Output").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Output").front(); - engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); + this->engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); auto *filter_v = scope.FindVar(op_desc.Input("Filter").front()); PADDLE_ENFORCE_NOT_NULL(filter_v); - auto *filter_t = filter_v->GetMutable(); - std::unique_ptr weight_tensor( - new framework::LoDTensor()); - weight_tensor->Resize(filter_t->dims()); - TensorCopySync((*filter_t), platform::CPUPlace(), weight_tensor.get()); + auto weight_tensor = tensor_from_var(*filter_v, platform::CPUPlace()); + auto weight_shape = framework::vectorize2int(weight_tensor->dims()); PADDLE_ENFORCE_EQ(weight_tensor->dims().size(), 4UL); - // const int n_output = weight_tensor->dims()[0]; - // const int n_input = weight_tensor->dims()[1]; const int filter_h = weight_tensor->dims()[2]; const int filter_w = weight_tensor->dims()[3]; - // auto filter_num = n_input * filter_h * filter_w ; + auto filter_num = weight_tensor->dims()[0]; - engine_->AddOpAttr(op_name, "filter_num", filter_num); - engine_->AddOpAttr>(op_name, "kernel_size", {filter_h, filter_w}); + this->engine_->template AddOpAttr(op_name, "filter_num", filter_num); + this->engine_->template AddOpAttr>(op_name, "kernel_size", + {filter_h, filter_w}); auto strides = boost::get>(op_desc.GetAttr("strides")); - engine_->AddOpAttr>(op_name, "strides", strides); + this->engine_->template AddOpAttr>(op_name, "strides", strides); auto paddings = boost::get>(op_desc.GetAttr("paddings")); - engine_->AddOpAttr>(op_name, "padding", paddings); + this->engine_->template AddOpAttr>(op_name, "padding", paddings); auto dilations = boost::get>(op_desc.GetAttr("dilations")); - engine_->AddOpAttr>(op_name, "dilation_rate", dilations); + this->engine_->template AddOpAttr>(op_name, "dilation_rate", + dilations); const int groups = boost::get(op_desc.GetAttr("groups")); - engine_->AddOpAttr(op_name, "group", groups); - engine_->AddOpAttr(op_name, "axis", 1); - engine_->AddOpAttr(op_name, "bias_term", false); + this->engine_->AddOpAttr(op_name, "group", groups); + this->engine_->AddOpAttr(op_name, "axis", 1); + this->engine_->AddOpAttr(op_name, "bias_term", false); + + ::anakin::saber::Shape anakin_shape(weight_shape); + bool enable_int8 = boost::get(op_desc.HasAttr("enable_int8")); - auto weight_shape = framework::vectorize2int(filter_t->dims()); - Shape anakin_shape(weight_shape); - auto *weight1 = - GraphGlobalMem::Global().template new_block(anakin_shape); - float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(weight_tensor->data(), weight_tensor->numel(), cpu_data); - weight1->d_tensor().set_shape(anakin_shape); - weight1->d_tensor().copy_from(weight1->h_tensor()); - engine_->AddOpAttr(op_name, "weight_1", *weight1); + if (enable_int8) { + const float int8_range = 127.; + float in_scale = boost::get(op_desc.GetAttr("input_scale")); + float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); + PBlock *weight1 = + new PBlock(anakin_shape, ::anakin::AK_INT8); + this->engine_->RegistBlock(weight1); + float *weight_data = weight_tensor->data(); + std::vector weight_int8; + int weight_num = weight_tensor->numel(); + for (int i = 0; i < weight_tensor->numel(); i++) { + bool is_valid_int8 = + ((weight_data[i] >= -128) && (weight_data[i] <= 127)); + PADDLE_ENFORCE(is_valid_int8, + "We are in anakin subgraph int8 mode, the weight of conv " + "should be in range [-128, 127]"); + weight_int8.push_back(static_cast(weight_data[i])); + } + memcpy(static_cast(weight1->h_tensor().mutable_data()), + static_cast(weight_int8.data()), sizeof(char) * weight_num); + weight1->d_tensor().set_shape(anakin_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->Graph()->SetOpPrec(op_name, ::anakin::AK_INT8); + this->engine_->Graph()->SetWeightsScale(op_name, + {weight_scale / int8_range}, false); + this->engine_->AddTensorScale(input_name, in_scale / int8_range); + } else { + auto *weight1 = pblock_from_tensor( + *weight_tensor, weight_shape, this->engine_); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + } } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/conv2d.h b/paddle/fluid/inference/anakin/convert/conv2d.h index dca5d19f468ac6d6e2f4bcda8ecaa3922d80e6b1..b22cb8ea9318cf75deacc681e3c1e7b271d1f86b 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d.h +++ b/paddle/fluid/inference/anakin/convert/conv2d.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class Conv2dOpConverter : public AnakinOpConverter { +template +class Conv2dOpConverter : public AnakinOpConverter { public: Conv2dOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc index fa1ab0efeeb5cacd112ca1b644735eaaf49e55f8..a1568b8bdeeb93790ecc5f37844e7bf4b8892993 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc @@ -16,21 +16,18 @@ #include #include #include +#include "paddle/fluid/inference/anakin/convert/helper.h" -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void Conv2dFusionOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void Conv2dFusionOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1UL); PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1UL); @@ -40,71 +37,74 @@ void Conv2dFusionOpConverter::operator()(const framework::proto::OpDesc &op, auto input_name = op_desc.Input("Input").front(); auto output_name = op_desc.Output("Output").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Output").front(); - engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); + this->engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); auto *filter_v = scope.FindVar(op_desc.Input("Filter").front()); PADDLE_ENFORCE_NOT_NULL(filter_v); - auto *filter_t = filter_v->GetMutable(); + + auto weight_tensor = tensor_from_var(*filter_v, platform::CPUPlace()); + auto weight_shape = framework::vectorize2int(weight_tensor->dims()); auto *b_v = scope.FindVar(op_desc.Input("Bias").front()); PADDLE_ENFORCE_NOT_NULL(b_v); - auto *b_t = b_v->GetMutable(); - - std::unique_ptr weight_tensor( - new framework::LoDTensor()); - weight_tensor->Resize(filter_t->dims()); - TensorCopySync((*filter_t), platform::CPUPlace(), weight_tensor.get()); PADDLE_ENFORCE_EQ(weight_tensor->dims().size(), 4UL); - - // const int n_output = weight_tensor->dims()[0]; - // const int n_input = weight_tensor->dims()[1]; const int filter_h = weight_tensor->dims()[2]; const int filter_w = weight_tensor->dims()[3]; - // auto filter_num = n_input * filter_h * filter_w ; auto filter_num = weight_tensor->dims()[0]; - engine_->AddOpAttr(op_name, "filter_num", filter_num); - engine_->AddOpAttr>(op_name, "kernel_size", {filter_h, filter_w}); + this->engine_->template AddOpAttr(op_name, "filter_num", filter_num); + this->engine_->template AddOpAttr>(op_name, "kernel_size", + {filter_h, filter_w}); auto strides = boost::get>(op_desc.GetAttr("strides")); - engine_->AddOpAttr>(op_name, "strides", strides); + this->engine_->template AddOpAttr>(op_name, "strides", strides); auto paddings = boost::get>(op_desc.GetAttr("paddings")); - engine_->AddOpAttr>(op_name, "padding", paddings); + this->engine_->template AddOpAttr>(op_name, "padding", paddings); auto dilations = boost::get>(op_desc.GetAttr("dilations")); - engine_->AddOpAttr>(op_name, "dilation_rate", dilations); + this->engine_->template AddOpAttr>(op_name, "dilation_rate", + dilations); const int groups = boost::get(op_desc.GetAttr("groups")); - engine_->AddOpAttr(op_name, "group", groups); - engine_->AddOpAttr(op_name, "axis", 1); - engine_->AddOpAttr(op_name, "bias_term", true); - - auto weight_shape = framework::vectorize2int(filter_t->dims()); - Shape anakin_shape(weight_shape); - auto *weight1 = - GraphGlobalMem::Global().template new_block(anakin_shape); - float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(weight_tensor->data(), weight_tensor->numel(), cpu_data); - weight1->d_tensor().set_shape(anakin_shape); - weight1->d_tensor().copy_from(weight1->h_tensor()); - engine_->AddOpAttr(op_name, "weight_1", *weight1); - - auto bias_shape = framework::vectorize2int(b_t->dims()); - framework::LoDTensor bias_tensor; - bias_tensor.Resize(b_t->dims()); - TensorCopySync((*b_t), platform::CPUPlace(), &bias_tensor); - auto *bias_data = bias_tensor.data(); - bias_shape.insert(bias_shape.begin(), 1); - bias_shape.insert(bias_shape.begin(), 1); - bias_shape.insert(bias_shape.begin(), 1); - // bias_shape.push_back(1); - // bias_shape.push_back(1); - Shape anakin_bias_shape(bias_shape); + this->engine_->AddOpAttr(op_name, "group", groups); + this->engine_->AddOpAttr(op_name, "axis", 1); + this->engine_->AddOpAttr(op_name, "bias_term", true); - auto *weight2 = GraphGlobalMem::Global().template new_block( - anakin_bias_shape); - float *cpu_data2 = static_cast(weight2->h_tensor().mutable_data()); - std::copy_n(bias_data, bias_tensor.numel(), cpu_data2); - weight2->d_tensor().set_shape(anakin_bias_shape); - weight2->d_tensor().copy_from(weight2->h_tensor()); - engine_->AddOpAttr(op_name, "weight_2", *weight2); + ::anakin::saber::Shape anakin_shape(weight_shape); + bool enable_int8 = boost::get(op_desc.HasAttr("enable_int8")); + if (enable_int8) { + const float int8_range = 127.; + float in_scale = boost::get(op_desc.GetAttr("input_scale")); + float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); + PBlock *weight1 = + new PBlock(anakin_shape, ::anakin::AK_INT8); + this->engine_->RegistBlock(weight1); + float *weight_data = weight_tensor->data(); + std::vector weight_int8; + int weight_num = weight_tensor->numel(); + for (int i = 0; i < weight_tensor->numel(); i++) { + bool is_valid_int8 = + ((weight_data[i] >= -128) && (weight_data[i] <= 127)); + PADDLE_ENFORCE(is_valid_int8, + "We are in anakin subgraph int8 mode, the weight of conv " + "should be in range [-128, 127]"); + weight_int8.push_back(static_cast(weight_data[i])); + } + memcpy(static_cast(weight1->h_tensor().mutable_data()), + static_cast(weight_int8.data()), sizeof(char) * weight_num); + weight1->d_tensor().set_shape(anakin_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->Graph()->SetOpPrec(op_name, ::anakin::AK_INT8); + this->engine_->Graph()->SetWeightsScale(op_name, + {weight_scale / int8_range}, false); + this->engine_->AddTensorScale(input_name, in_scale / int8_range); + } else { + auto weight_tensor = tensor_from_var(*filter_v, platform::CPUPlace()); + auto weight_shape = framework::vectorize2int(weight_tensor->dims()); + auto *weight1 = pblock_from_tensor( + *weight_tensor, weight_shape, this->engine_); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + auto weight2 = pblock_from_var(*b_v, this->engine_); + this->engine_->AddOpAttr(op_name, "weight_2", *weight2); + } } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.h b/paddle/fluid/inference/anakin/convert/conv2d_fusion.h index 0d9ef28183b309c4b50714fcbe64e24c5d9dfbaa..768814d3f996dd5c7224d5aebcbed9d430439ab5 100644 --- a/paddle/fluid/inference/anakin/convert/conv2d_fusion.h +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class Conv2dFusionOpConverter : public AnakinOpConverter { +template +class Conv2dFusionOpConverter : public AnakinOpConverter { public: Conv2dFusionOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/density_prior_box.cc b/paddle/fluid/inference/anakin/convert/density_prior_box.cc index 30796f7592427191a4396a154be62838b7e666ad..5bbaeb57a7da46adfaa47fb696b4b73c8e33c7f0 100644 --- a/paddle/fluid/inference/anakin/convert/density_prior_box.cc +++ b/paddle/fluid/inference/anakin/convert/density_prior_box.cc @@ -17,17 +17,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void DensityPriorBoxOpConverter::operator()( +template +void DensityPriorBoxOpConverter::operator()( const framework::proto::OpDesc& op, const framework::BlockDesc& block_desc, const framework::Scope& scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -81,22 +78,30 @@ void DensityPriorBoxOpConverter::operator()( std::vector temp_v = {}; - engine_->AddOp(op_name, "PriorBox", {input_name, image_name}, {output_name}); - engine_->AddOpAttr>(op_name, "min_size", min_sizes); - engine_->AddOpAttr>(op_name, "max_size", max_sizes); - engine_->AddOpAttr>(op_name, "aspect_ratio", aspect_ratios); - engine_->AddOpAttr>(op_name, "fixed_size", fixed_sizes); - engine_->AddOpAttr>(op_name, "fixed_ratio", fixed_ratios); - engine_->AddOpAttr>(op_name, "density", dens); - engine_->AddOpAttr(op_name, "is_flip", is_flip); - engine_->AddOpAttr(op_name, "is_clip", is_clip); - engine_->AddOpAttr>(op_name, "variance", variances); - engine_->AddOpAttr(op_name, "img_h", static_cast(0)); - engine_->AddOpAttr(op_name, "img_w", static_cast(0)); - engine_->AddOpAttr(op_name, "step_h", step_h); - engine_->AddOpAttr(op_name, "step_w", step_w); - engine_->AddOpAttr(op_name, "offset", offset); - engine_->AddOpAttr>(op_name, "order", t_order); + this->engine_->AddOp(op_name, "PriorBox", {input_name, image_name}, + {output_name}); + this->engine_->template AddOpAttr>(op_name, "min_size", + min_sizes); + this->engine_->template AddOpAttr>(op_name, "max_size", + max_sizes); + this->engine_->template AddOpAttr>(op_name, "aspect_ratio", + aspect_ratios); + this->engine_->template AddOpAttr>(op_name, "fixed_size", + fixed_sizes); + this->engine_->template AddOpAttr>(op_name, "fixed_ratio", + fixed_ratios); + this->engine_->template AddOpAttr>(op_name, "density", dens); + this->engine_->AddOpAttr(op_name, "is_flip", is_flip); + this->engine_->AddOpAttr(op_name, "is_clip", is_clip); + this->engine_->template AddOpAttr>(op_name, "variance", + variances); + this->engine_->AddOpAttr(op_name, "img_h", static_cast(0)); + this->engine_->AddOpAttr(op_name, "img_w", static_cast(0)); + this->engine_->AddOpAttr(op_name, "step_h", step_h); + this->engine_->AddOpAttr(op_name, "step_w", step_w); + this->engine_->AddOpAttr(op_name, "offset", offset); + this->engine_->template AddOpAttr>(op_name, "order", + t_order); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/density_prior_box.h b/paddle/fluid/inference/anakin/convert/density_prior_box.h index bf9210711a0f69595c241803cd40d42770ccd5d7..5714f57a04b7b34581f4deb5cbdd2eb4318ba72c 100644 --- a/paddle/fluid/inference/anakin/convert/density_prior_box.h +++ b/paddle/fluid/inference/anakin/convert/density_prior_box.h @@ -22,7 +22,9 @@ namespace paddle { namespace inference { namespace anakin { -class DensityPriorBoxOpConverter : public AnakinOpConverter { +template +class DensityPriorBoxOpConverter + : public AnakinOpConverter { public: DensityPriorBoxOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/detection_out.cc b/paddle/fluid/inference/anakin/convert/detection_out.cc index 262ad28a654609cddde979d387621bb0c7c1a7f9..73dd6f2832541ecda119702f1779363e2950e413 100644 --- a/paddle/fluid/inference/anakin/convert/detection_out.cc +++ b/paddle/fluid/inference/anakin/convert/detection_out.cc @@ -16,19 +16,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -void DetectionOutOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void DetectionOutOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); auto target_name = op_desc.Input("TargetBox").front(); auto prior_box_name = op_desc.Input("PriorBox").front(); @@ -52,18 +47,19 @@ void DetectionOutOpConverter::operator()(const framework::proto::OpDesc &op, "Not support encode_center_size code_type in DetectionOut of anakin"); } - engine_->AddOp(op_name, "DetectionOutput", - {target_name, scores_name, prior_box_name}, {output_name}); - engine_->AddOpAttr(op_name, "share_location", true); - engine_->AddOpAttr(op_name, "variance_encode_in_target", false); - engine_->AddOpAttr(op_name, "class_num", static_cast(0)); - engine_->AddOpAttr(op_name, "background_id", background_label); - engine_->AddOpAttr(op_name, "keep_top_k", keep_top_k); - engine_->AddOpAttr(op_name, "code_type", anakin_code_type); - engine_->AddOpAttr(op_name, "conf_thresh", score_threshold); - engine_->AddOpAttr(op_name, "nms_top_k", nms_top_k); - engine_->AddOpAttr(op_name, "nms_thresh", nms_threshold); - engine_->AddOpAttr(op_name, "nms_eta", nms_eta); + this->engine_->AddOp(op_name, "DetectionOutput", + {target_name, scores_name, prior_box_name}, + {output_name}); + this->engine_->AddOpAttr(op_name, "share_location", true); + this->engine_->AddOpAttr(op_name, "variance_encode_in_target", false); + this->engine_->AddOpAttr(op_name, "class_num", static_cast(0)); + this->engine_->AddOpAttr(op_name, "background_id", background_label); + this->engine_->AddOpAttr(op_name, "keep_top_k", keep_top_k); + this->engine_->AddOpAttr(op_name, "code_type", anakin_code_type); + this->engine_->AddOpAttr(op_name, "conf_thresh", score_threshold); + this->engine_->AddOpAttr(op_name, "nms_top_k", nms_top_k); + this->engine_->AddOpAttr(op_name, "nms_thresh", nms_threshold); + this->engine_->AddOpAttr(op_name, "nms_eta", nms_eta); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/detection_out.h b/paddle/fluid/inference/anakin/convert/detection_out.h index ca78f10fdc2a7c7064ae0399e7f1afff1383ce67..c34342a66c1c6c42585c4cb92d64ed3964f7f427 100644 --- a/paddle/fluid/inference/anakin/convert/detection_out.h +++ b/paddle/fluid/inference/anakin/convert/detection_out.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class DetectionOutOpConverter : public AnakinOpConverter { +template +class DetectionOutOpConverter : public AnakinOpConverter { public: DetectionOutOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/dropout.cc b/paddle/fluid/inference/anakin/convert/dropout.cc index bc9b26dcf2733369e558cde2954e9d0caaba86b0..6c5f80b5f8e07fd501348632ff3b4cda58de248c 100644 --- a/paddle/fluid/inference/anakin/convert/dropout.cc +++ b/paddle/fluid/inference/anakin/convert/dropout.cc @@ -16,24 +16,16 @@ #include #include #include - -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; +#include "paddle/fluid/inference/anakin/convert/helper.h" namespace paddle { namespace inference { namespace anakin { -void DropoutOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void DropoutOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Mask").size(), 1); @@ -43,21 +35,17 @@ void DropoutOpConverter::operator()(const framework::proto::OpDesc &op, auto out_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Scale", {x_name}, {out_name}); + this->engine_->AddOp(op_name, "Scale", {x_name}, {out_name}); auto dropout_prob = boost::get(op_desc.GetAttr("dropout_prob")); auto factor = 1 - dropout_prob; - Shape shape1(std::vector({1, 1, 1, 1})); - auto *weight1 = - GraphGlobalMem::Global().template new_block(shape1); - auto *factor_data = static_cast(weight1->h_tensor().mutable_data()); - float weight1_data[] = {factor}; - std::copy(std::begin(weight1_data), std::end(weight1_data), factor_data); + auto *weight1 = pblock_from_vector( + std::vector({factor}), this->engine_); - engine_->AddOpAttr(op_name, "weight_1", *weight1); - engine_->AddOpAttr(op_name, "axis", 0); - engine_->AddOpAttr(op_name, "num_axes", 0); - engine_->AddOpAttr(op_name, "bias_term", false); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->AddOpAttr(op_name, "axis", 0); + this->engine_->AddOpAttr(op_name, "num_axes", 0); + this->engine_->AddOpAttr(op_name, "bias_term", false); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/dropout.h b/paddle/fluid/inference/anakin/convert/dropout.h index 11412e217ef5fa77bd22d7530d88be1347f2616f..801aa3dd16f8504360c75e251896f6bd4718925b 100644 --- a/paddle/fluid/inference/anakin/convert/dropout.h +++ b/paddle/fluid/inference/anakin/convert/dropout.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class DropoutOpConverter : public AnakinOpConverter { +template +class DropoutOpConverter : public AnakinOpConverter { public: DropoutOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/elementwise.cc b/paddle/fluid/inference/anakin/convert/elementwise.cc index fe9a896d8266e06250b712be0c75290c039e9a08..dd32baa0b90018c8e0175fa9cae85a9fbeccedf0 100644 --- a/paddle/fluid/inference/anakin/convert/elementwise.cc +++ b/paddle/fluid/inference/anakin/convert/elementwise.cc @@ -17,20 +17,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void ElementwiseAddOpConverter::operator()( +template +void ElementwiseAddOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -43,14 +37,16 @@ void ElementwiseAddOpConverter::operator()( auto out_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Eltwise", {x_name, y_name}, {out_name}); + this->engine_->AddOp(op_name, "Eltwise", {x_name, y_name}, {out_name}); std::string elementwise_type = "Add"; - engine_->AddOpAttr(op_name, "type", elementwise_type); + this->engine_->template AddOpAttr(op_name, "type", + elementwise_type); std::vector coeff = {1.0, 1.0}; - engine_->AddOpAttr>(op_name, "coeff", coeff); + this->engine_->template AddOpAttr>(op_name, "coeff", coeff); } -void ElementwiseMulOpConverter::operator()( +template +void ElementwiseMulOpConverter::operator()( const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); @@ -63,21 +59,12 @@ void ElementwiseMulOpConverter::operator()( auto out_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Scale", {x_name, y_name}, {out_name}); - // Fill a number to weight_1 as a placeholder. - Shape shape1(std::vector({1, 1, 1, 1})); - auto *weight1 = - GraphGlobalMem::Global().template new_block(shape1); - auto *placeholder_data = - static_cast(weight1->h_tensor().mutable_data()); - float weight1_data[] = {1}; - std::copy(std::begin(weight1_data), std::end(weight1_data), placeholder_data); - engine_->AddOpAttr(op_name, "weight_1", *weight1); - - auto axis = boost::get(op_desc.GetAttr("axis")); - engine_->AddOpAttr(op_name, "axis", axis); - engine_->AddOpAttr(op_name, "num_axes", 1); - engine_->AddOpAttr(op_name, "bias_term", false); + this->engine_->AddOp(op_name, "Eltwise", {x_name, y_name}, {out_name}); + std::string elementwise_type = "Prod"; + this->engine_->template AddOpAttr(op_name, "type", + elementwise_type); + std::vector coeff = {1.0, 1.0}; + this->engine_->template AddOpAttr>(op_name, "coeff", coeff); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/elementwise.h b/paddle/fluid/inference/anakin/convert/elementwise.h index e4664493a9d3ce1ed9a0c79a05fb466c4e781b3e..190a8b55f0e3c29e9e9c8b254d4b4df824c3330b 100644 --- a/paddle/fluid/inference/anakin/convert/elementwise.h +++ b/paddle/fluid/inference/anakin/convert/elementwise.h @@ -20,7 +20,9 @@ namespace paddle { namespace inference { namespace anakin { -class ElementwiseAddOpConverter : public AnakinOpConverter { +template +class ElementwiseAddOpConverter + : public AnakinOpConverter { public: ElementwiseAddOpConverter() = default; @@ -33,7 +35,9 @@ class ElementwiseAddOpConverter : public AnakinOpConverter { private: }; -class ElementwiseMulOpConverter : public AnakinOpConverter { +template +class ElementwiseMulOpConverter + : public AnakinOpConverter { public: ElementwiseMulOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/fc.cc b/paddle/fluid/inference/anakin/convert/fc.cc index a80a1a47e91aa085935b5febb3858e028f396091..0621e3377b34660e3c2f1d1b83847bd46b5bd26d 100644 --- a/paddle/fluid/inference/anakin/convert/fc.cc +++ b/paddle/fluid/inference/anakin/convert/fc.cc @@ -16,23 +16,19 @@ #include #include #include - -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; +#include "paddle/fluid/inference/anakin/convert/helper.h" namespace paddle { namespace inference { namespace anakin { -void FcBaseOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void FcBaseOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); auto input_names = op_desc.InputNames(); - bool with_bias = input_names.size() == 3; + bool with_bias = input_names.size() >= 3; std::string w_name = "Y"; std::string i_name = "X"; @@ -46,71 +42,74 @@ void FcBaseOpConverter::operator()(const framework::proto::OpDesc &op, // get weights auto *y_v = scope.FindVar(op_desc.Input(w_name).front()); PADDLE_ENFORCE_NOT_NULL(y_v); - auto *y_t = y_v->GetMutable(); - - auto input_name = op_desc.Input(i_name).front(); - auto output_name = op_desc.Output("Out").front(); + auto weight_tensor = tensor_from_var(*y_v, platform::CPUPlace()); + auto weight_shape = framework::vectorize2int(weight_tensor->dims()); - engine_->AddOp(op_name, "Dense", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "bias_term", with_bias); - engine_->AddOpAttr(op_name, "axis", 1); - - auto weight_shape = framework::vectorize2int(y_t->dims()); int out_dim = weight_shape[1]; - engine_->AddOpAttr(op_name, "out_dim", out_dim); const int w_m = weight_shape[0]; const int w_k = weight_shape[1]; - if (weight_shape.size() < 4UL) { - weight_shape.insert(weight_shape.begin(), 4UL - weight_shape.size(), 1); - } - Shape anakin_shape(weight_shape); + auto input_name = op_desc.Input(i_name).front(); + auto output_name = op_desc.Output("Out").front(); - framework::LoDTensor weight_tensor; - weight_tensor.Resize(y_t->dims()); - TensorCopySync((*y_t), platform::CPUPlace(), &weight_tensor); - auto *weight_data = weight_tensor.data(); - PADDLE_ENFORCE(w_m * w_k == weight_tensor.numel()); + this->engine_->AddOp(op_name, "Dense", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "bias_term", with_bias); + this->engine_->AddOpAttr(op_name, "axis", 1); + this->engine_->AddOpAttr(op_name, "out_dim", out_dim); - std::vector trans_weight_data(weight_tensor.numel()); + auto *weight_data = weight_tensor->data(); + PADDLE_ENFORCE(w_m * w_k == weight_tensor->numel()); + + std::vector trans_weight_data(weight_tensor->numel()); for (int i = 0; i < w_m; i++) { for (int j = 0; j < w_k; j++) { trans_weight_data[i + j * w_m] = weight_data[i * w_k + j]; } } - auto *weight1 = - GraphGlobalMem::Global().template new_block(anakin_shape); - float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(trans_weight_data.data(), weight_tensor.numel(), cpu_data); - weight1->d_tensor().set_shape(anakin_shape); - weight1->d_tensor().copy_from(weight1->h_tensor()); - engine_->AddOpAttr(op_name, "weight_1", *weight1); + + int weight_num = weight_tensor->numel(); + bool enable_int8 = boost::get(op_desc.HasAttr("enable_int8")); + if (enable_int8) { + if (weight_shape.size() < 4UL) { + weight_shape.insert(weight_shape.begin(), 4UL - weight_shape.size(), 1); + } + ::anakin::saber::Shape anakin_shape(weight_shape); + const float int8_range = 127.; + float in_scale = boost::get(op_desc.GetAttr("input_scale")); + float weight_scale = boost::get(op_desc.GetAttr("weight_scale")); + PBlock *weight1 = + new PBlock(anakin_shape, ::anakin::AK_INT8); + this->engine_->RegistBlock(weight1); + std::vector weight_int8; + for (int i = 0; i < weight_num; i++) { + bool is_valid_int8 = + ((trans_weight_data[i] >= -128) && (trans_weight_data[i] <= 127)); + PADDLE_ENFORCE(is_valid_int8, + "We are in anakin subgraph int8 mode, the weight of fc " + "should be in range [-128, 127]"); + weight_int8.push_back(static_cast(trans_weight_data[i])); + } + memcpy(static_cast(weight1->h_tensor().mutable_data()), + static_cast(weight_int8.data()), sizeof(char) * weight_num); + weight1->d_tensor().set_shape(anakin_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + this->engine_->Graph()->SetOpPrec(op_name, ::anakin::AK_INT8); + this->engine_->Graph()->SetWeightsScale(op_name, + {weight_scale / int8_range}, false); + this->engine_->AddTensorScale(input_name, in_scale / int8_range); + } else { + auto *weight1 = pblock_from_vector(trans_weight_data, + this->engine_); + this->engine_->AddOpAttr(op_name, "weight_1", *weight1); + } // get bias if (with_bias) { auto *b_v = scope.FindVar(op_desc.Input("Bias").front()); PADDLE_ENFORCE_NOT_NULL(b_v); - auto *b_t = b_v->GetMutable(); - - auto bias_shape = framework::vectorize2int(b_t->dims()); - framework::LoDTensor bias_tensor; - bias_tensor.Resize(b_t->dims()); - TensorCopySync((*b_t), platform::CPUPlace(), &bias_tensor); - auto *bias_data = bias_tensor.data(); - bias_shape.insert(bias_shape.begin(), 1); - bias_shape.insert(bias_shape.begin(), 1); - bias_shape.insert(bias_shape.begin(), 1); - // bias_shape.push_back(1); - // bias_shape.push_back(1); - Shape anakin_bias_shape(bias_shape); - - auto *weight2 = GraphGlobalMem::Global().template new_block( - anakin_bias_shape); - float *cpu_data2 = static_cast(weight2->h_tensor().mutable_data()); - std::copy_n(bias_data, bias_tensor.numel(), cpu_data2); - weight2->d_tensor().set_shape(anakin_bias_shape); - weight2->d_tensor().copy_from(weight2->h_tensor()); - engine_->AddOpAttr(op_name, "weight_2", *weight2); + auto weight2 = pblock_from_var(*b_v, this->engine_); + this->engine_->AddOpAttr(op_name, "weight_2", *weight2); } } diff --git a/paddle/fluid/inference/anakin/convert/fc.h b/paddle/fluid/inference/anakin/convert/fc.h index fb461908b35e0111065e1a46c52306c64ace7d7c..6fe65e3ecd4ec42b6a1b5d874d0306cfba86c8b2 100644 --- a/paddle/fluid/inference/anakin/convert/fc.h +++ b/paddle/fluid/inference/anakin/convert/fc.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class FcBaseOpConverter : public AnakinOpConverter { +template +class FcBaseOpConverter : public AnakinOpConverter { public: FcBaseOpConverter() = default; @@ -32,13 +33,15 @@ class FcBaseOpConverter : public AnakinOpConverter { }; // with bias -class FcOpConverter : public FcBaseOpConverter { +template +class FcOpConverter : public FcBaseOpConverter { public: FcOpConverter() = default; }; // without bias -class MulOpConverter : public FcBaseOpConverter { +template +class MulOpConverter : public FcBaseOpConverter { public: MulOpConverter() = default; }; diff --git a/paddle/fluid/inference/anakin/convert/flatten.cc b/paddle/fluid/inference/anakin/convert/flatten.cc index 7f5c1510960d1014c33bd565939812fe7c7dfc06..7ce519a4de36c950bef1b4e856452828398aa57e 100644 --- a/paddle/fluid/inference/anakin/convert/flatten.cc +++ b/paddle/fluid/inference/anakin/convert/flatten.cc @@ -15,20 +15,16 @@ #include "paddle/fluid/inference/anakin/convert/flatten.h" #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void FlattenOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void FlattenOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1UL); @@ -41,8 +37,8 @@ void FlattenOpConverter::operator()(const framework::proto::OpDesc &op, std::vector out_dims = {0, -1, 1, 1}; auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Reshape", {input}, {output}); - engine_->AddOpAttr>(op_name, "dims", out_dims); + this->engine_->AddOp(op_name, "Reshape", {input}, {output}); + this->engine_->template AddOpAttr>(op_name, "dims", out_dims); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/flatten.h b/paddle/fluid/inference/anakin/convert/flatten.h index c9cc0006eb2448917bbcc0952f5e2cae72b73de1..6e5e059927d4d35cb28e383041a1c6ce1d59b282 100644 --- a/paddle/fluid/inference/anakin/convert/flatten.h +++ b/paddle/fluid/inference/anakin/convert/flatten.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class FlattenOpConverter : public AnakinOpConverter { +template +class FlattenOpConverter : public AnakinOpConverter { public: FlattenOpConverter() = default; diff --git a/paddle/fluid/framework/details/sequential_execution_pass.h b/paddle/fluid/inference/anakin/convert/helper.cc similarity index 58% rename from paddle/fluid/framework/details/sequential_execution_pass.h rename to paddle/fluid/inference/anakin/convert/helper.cc index 7d6a4f4cc55698d80a60333d2e8d528b4a3b1641..7804619bf836d93beae5dba9b561da273936c381 100644 --- a/paddle/fluid/framework/details/sequential_execution_pass.h +++ b/paddle/fluid/inference/anakin/convert/helper.cc @@ -12,20 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -#pragma once - -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/inference/anakin/convert/helper.h" namespace paddle { -namespace framework { -namespace details { +namespace inference { +namespace anakin { -class SequentialExecutionPass : public ir::Pass { - protected: - void ApplyImpl(ir::Graph* graph) const override; -}; +std::unique_ptr tensor_from_var( + const framework::Variable& var, const platform::Place& place) { + auto& src = var.Get(); + std::unique_ptr dst(new framework::LoDTensor()); + dst->Resize(src.dims()); + TensorCopySync((src), place, dst.get()); + return dst; +} -} // namespace details -} // namespace framework +} // namespace anakin +} // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/helper.h b/paddle/fluid/inference/anakin/convert/helper.h new file mode 100644 index 0000000000000000000000000000000000000000..7b0fb211dcd8aa03fdad91d7cacfa11d2ceaae43 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/helper.h @@ -0,0 +1,95 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include + +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/inference/anakin/engine.h" + +#include "framework/core/net/net.h" +#include "framework/core/types.h" +#include "framework/graph/graph.h" +#include "framework/graph/graph_global_mem.h" +#include "saber/saber_types.h" + +using anakin::saber::Shape; +using anakin::AK_FLOAT; +using anakin::AK_INT8; +using anakin::PBlock; + +namespace paddle { +namespace inference { +namespace anakin { + +std::unique_ptr tensor_from_var( + const framework::Variable& var, const platform::Place& place); + +template +PBlock* pblock_from_tensor(const framework::LoDTensor& tensor, + std::vector shape_vec, + AnakinEngine* engine) { + while (shape_vec.size() < 4) { + shape_vec.insert(shape_vec.begin(), 1); + } + Shape shape(shape_vec); + PBlock* weight = new PBlock(shape, AK_FLOAT); + engine->RegistBlock(weight); + float* cpu_data = static_cast(weight->h_tensor().mutable_data()); + std::copy_n(tensor.data(), tensor.numel(), cpu_data); + weight->d_tensor().set_shape(shape); + weight->d_tensor().copy_from(weight->h_tensor()); + return weight; +} + +template +PBlock* pblock_from_vector(const std::vector& vec, + std::vector shape_vec, + AnakinEngine* engine) { + while (shape_vec.size() < 4) { + shape_vec.insert(shape_vec.begin(), 1); + } + Shape shape(shape_vec); + PBlock* weight = new PBlock(shape, AK_FLOAT); + engine->RegistBlock(weight); + auto* weight_data = static_cast(weight->h_tensor().mutable_data()); + std::copy(std::begin(vec), std::end(vec), weight_data); + weight->d_tensor().set_shape(shape); + weight->d_tensor().copy_from(weight->h_tensor()); + return weight; +} + +template +PBlock* pblock_from_vector(const std::vector& vec, + AnakinEngine* engine) { + int size = vec.size(); + return pblock_from_vector( + vec, std::vector({1, 1, 1, size}), engine); +} + +template +PBlock* pblock_from_var(const framework::Variable& var, + AnakinEngine* engine) { + auto tensor = tensor_from_var(var, platform::CPUPlace()); + auto shape = framework::vectorize2int(tensor->dims()); + return pblock_from_tensor(*tensor, shape, engine); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/im2sequence.cc b/paddle/fluid/inference/anakin/convert/im2sequence.cc index 2cc330c3829f6033229748523c3df750b951626f..5a4e3e61c5e4e40d8fe98fba8e098d89d916dde1 100644 --- a/paddle/fluid/inference/anakin/convert/im2sequence.cc +++ b/paddle/fluid/inference/anakin/convert/im2sequence.cc @@ -17,23 +17,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void Im2SequenceConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void Im2SequenceConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Y").size(), 0); @@ -43,17 +36,19 @@ void Im2SequenceConverter::operator()(const framework::proto::OpDesc &op, auto out_name = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Im2Sequence", {x_name}, {out_name}); + this->engine_->AddOp(op_name, "Im2Sequence", {x_name}, {out_name}); std::vector dilations = {1, 1}; auto paddings = boost::get>(op_desc.GetAttr("paddings")); auto strides = boost::get>(op_desc.GetAttr("strides")); auto kernels = boost::get>(op_desc.GetAttr("kernels")); - engine_->AddOpAttr>(op_name, "paddings", paddings); - engine_->AddOpAttr>(op_name, "strides", strides); - engine_->AddOpAttr>(op_name, "window_size", kernels); - engine_->AddOpAttr>(op_name, "dilations", dilations); + this->engine_->template AddOpAttr>(op_name, "paddings", paddings); + this->engine_->template AddOpAttr>(op_name, "strides", strides); + this->engine_->template AddOpAttr>(op_name, "window_size", + kernels); + this->engine_->template AddOpAttr>(op_name, "dilations", + dilations); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/im2sequence.h b/paddle/fluid/inference/anakin/convert/im2sequence.h index 714679c1d9601136f1f54287bb58d611e852f3fe..8241d4d6f9ce78f57753bb7300e2fe968da4a927 100644 --- a/paddle/fluid/inference/anakin/convert/im2sequence.h +++ b/paddle/fluid/inference/anakin/convert/im2sequence.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class Im2SequenceConverter : public AnakinOpConverter { +template +class Im2SequenceConverter : public AnakinOpConverter { public: Im2SequenceConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/op_converter.h b/paddle/fluid/inference/anakin/convert/op_converter.h index 1ca62658ef26ffebcc068c91ece7d9bbed0a348f..a6ae51bd4b1c67104c732e12a66f74d7e4580bb5 100644 --- a/paddle/fluid/inference/anakin/convert/op_converter.h +++ b/paddle/fluid/inference/anakin/convert/op_converter.h @@ -32,10 +32,10 @@ namespace paddle { namespace inference { namespace anakin { -using AnakinNvEngine = - AnakinEngine<::anakin::saber::NV, ::anakin::Precision::FP32>; - +template class AnakinOpConverter { + using AnakinEngineT = AnakinEngine; + public: AnakinOpConverter() = default; @@ -45,7 +45,7 @@ class AnakinOpConverter { void ConvertOp(const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, const std::unordered_set ¶meters, - const framework::Scope &scope, AnakinNvEngine *engine, + const framework::Scope &scope, AnakinEngineT *engine, bool test_mode = false) { framework::OpDesc op_desc(op, nullptr); std::string op_type = op_desc.Type(); @@ -65,7 +65,7 @@ class AnakinOpConverter { void ConvertBlock(framework::BlockDesc *block_desc, const std::unordered_set ¶meters, - const framework::Scope &scope, AnakinNvEngine *engine) { + const framework::Scope &scope, AnakinEngineT *engine) { std::unique_lock lock(mutex_); framework::proto::BlockDesc *block = block_desc->Proto(); for (auto i = 0; i < block->ops_size(); i++) { @@ -79,9 +79,8 @@ class AnakinOpConverter { framework::BlockDesc *block_desc, framework::Scope *scope, const std::vector &inputs, const std::unordered_set ¶meters, - const std::vector &outputs, AnakinNvEngine *engine) { + const std::vector &outputs, AnakinEngineT *engine) { ConvertBlock(block_desc, parameters, *scope, engine); - engine->Freeze(); // if the max_batch size int max_batch_size = engine->GetMaxBatchSize(); PADDLE_ENFORCE(max_batch_size > 0, @@ -91,6 +90,18 @@ class AnakinOpConverter { // the block_desc. auto max_input_shape = engine->GetMaxInputShape(); std::map> temp_max_input_shape; + // Register outputs with anakin using the RegistVar interface before Freeze. + // Note that RegistVar's parameters can only be outputs, not inputs. + for (auto &output : outputs) { + engine->Graph()->RegistVar(output); + } + engine->Freeze(); + // Add scale for tensor in int8 mode. + auto tensor_scales = engine->GetTensorScales(); + + for (auto &item : tensor_scales) { + engine->Graph()->SetVarScale(item.first, item.second); + } for (auto &input : inputs) { if (parameters.count(input)) continue; @@ -99,7 +110,7 @@ class AnakinOpConverter { input_shape[0] = max_batch_size; if (max_input_shape.count(input)) { PADDLE_ENFORCE(max_input_shape[input].size() == 4, - "the dimensions of max_input_shape setted from " + "the dimensions of max_input_shape setted from " "config->EnableAnakinEngine must be 4"); for (int i = 1; i < 4; i++) { input_shape[i] = max_input_shape[input][i]; @@ -118,50 +129,104 @@ class AnakinOpConverter { } temp_max_input_shape[input] = input_shape; engine->SetInputShape(input, input_shape); - engine->Graph()->RegistVar(input); // For share from data. } engine->SetMaxInputShape(temp_max_input_shape); engine->Optimize(); - - // For anakin share with fluid tensor. - engine->AllocTmpMem(); - engine->InitGraph(); + engine->InitNet(); } - void SetEngine(AnakinNvEngine *engine) { engine_ = engine; } + void SetEngine(AnakinEngineT *engine) { engine_ = engine; } virtual ~AnakinOpConverter() {} protected: bool test_mode_; - AnakinNvEngine *engine_{nullptr}; + AnakinEngineT *engine_{nullptr}; private: - std::unordered_map converters_; + std::unordered_map *> + converters_; framework::Scope *scope_{nullptr}; std::mutex mutex_; }; +template class AnakinOpConverter<::anakin::saber::NV, + ::anakin::Precision::FP32>; +template class AnakinOpConverter<::anakin::saber::NV, + ::anakin::Precision::INT8>; + +template class AnakinOpConverter<::anakin::saber::X86, + ::anakin::Precision::FP32>; +template class AnakinOpConverter<::anakin::saber::X86, + ::anakin::Precision::INT8>; } // namespace anakin } // namespace inference } // namespace paddle -#define REGISTER_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ - struct anakin_##op_type__##_converter \ - : public ::paddle::framework::Registrar { \ - anakin_##op_type__##_converter() { \ - LOG(INFO) << "register convert " << #op_type__; \ - ::paddle::inference::Registry< \ - ::paddle::inference::anakin::AnakinOpConverter>::Global() \ - .Register<::paddle::inference::anakin::Converter__>(#op_type__); \ - } \ - }; \ - anakin_##op_type__##_converter anakin_##op_type__##_converter__; \ - int TouchConverterRegister_anakin_##op_type__() { \ - anakin_##op_type__##_converter__.Touch(); \ - return 0; \ +#define REGISTER_ANAKIN_OP_CONVERTER_BASE(op_type__, Converter__, \ + place_type__, place_class__, \ + precision_type__, precision_class__) \ + struct anakin_##op_type__##_##place_type__##_##precision_type__##_converter \ + : public ::paddle::framework::Registrar { \ + anakin_##op_type__##_##place_type__##_##precision_type__##_converter() { \ + LOG(INFO) << "register convert " << #op_type__ << " "; \ + ::paddle::inference::Registry< \ + ::paddle::inference::anakin::AnakinOpConverter< \ + place_class__, precision_class__>>::Global() \ + .Register(#op_type__); \ + } \ + }; \ + anakin_##op_type__##_##place_type__##_##precision_type__##_converter \ + anakin_##op_type__##_##place_type__##_##precision_type__##_converter__; \ + int Touch_anakin_##op_type__##_##place_type__##_##precision_type__() { \ + anakin_##op_type__##_##place_type__##_##precision_type__##_converter__ \ + .Touch(); \ + return 0; \ } -#define USE_ANAKIN_CONVERTER(op_type__) \ - extern int TouchConverterRegister_anakin_##op_type__(); \ - int use_op_converter_anakin_##op_type__ __attribute__((unused)) = \ - TouchConverterRegister_anakin_##op_type__(); +#define WRAP(...) __VA_ARGS__ + +#define REGISTER_CUDA_ANAKIN_OP_CONVERTER(op_type__, Converter__, \ + precision_type__) \ + REGISTER_ANAKIN_OP_CONVERTER_BASE( \ + op_type__, \ + ::paddle::inference::anakin::Converter__, \ + CUDA, ::anakin::saber::NV, precision_type__, \ + ::anakin::Precision::precision_type__) + +#define REGISTER_CPU_ANAKIN_OP_CONVERTER(op_type__, Converter__, \ + precision_type__) \ + REGISTER_ANAKIN_OP_CONVERTER_BASE( \ + op_type__, \ + ::paddle::inference::anakin::Converter__, \ + CPU, ::anakin::saber::X86, precision_type__, \ + ::anakin::Precision::precision_type__) + +#ifdef PADDLE_WITH_CUDA +#define REGISTER_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ + REGISTER_CUDA_ANAKIN_OP_CONVERTER(op_type__, Converter__, FP32); \ + REGISTER_CUDA_ANAKIN_OP_CONVERTER(op_type__, Converter__, INT8); \ + REGISTER_CPU_ANAKIN_OP_CONVERTER(op_type__, Converter__, FP32); \ + REGISTER_CPU_ANAKIN_OP_CONVERTER(op_type__, Converter__, INT8) +#else +#define REGISTER_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ + REGISTER_CPU_ANAKIN_OP_CONVERTER(op_type__, Converter__, FP32); \ + REGISTER_CPU_ANAKIN_OP_CONVERTER(op_type__, Converter__, INT8) +#endif + +#define USE_ANAKIN_CONVERTER_BASE(op_type__, place_type__, precision_type__) \ + extern int Touch_anakin_##op_type__##_##place_type__##_##precision_type__(); \ + int use_converter_anakin_##op_type__##_##place_type__##_##precision_type__ \ + __attribute__((unused)) = \ + Touch_anakin_##op_type__##_##place_type__##_##precision_type__(); + +#define USE_ANAKIN_CONVERTER(op_type__) \ + USE_ANAKIN_CONVERTER_BASE(op_type__, CUDA, FP32) +#define USE_INT8_ANAKIN_CONVERTER(op_type__) \ + USE_ANAKIN_CONVERTER_BASE(op_type__, CUDA, INT8) + +#define USE_CPU_ANAKIN_CONVERTER(op_type__) \ + USE_ANAKIN_CONVERTER_BASE(op_type__, CPU, FP32) +#define USE_CPU_INT8_ANAKIN_CONVERTER(op_type__) \ + USE_ANAKIN_CONVERTER_BASE(op_type__, CPU, INT8) diff --git a/paddle/fluid/inference/anakin/convert/pool2d.cc b/paddle/fluid/inference/anakin/convert/pool2d.cc index 87eefe712a5ad2acd8c9b5abe521c832ad2c1ef2..11e7c717fd689b27a619a33bcac4037b30f97af8 100644 --- a/paddle/fluid/inference/anakin/convert/pool2d.cc +++ b/paddle/fluid/inference/anakin/convert/pool2d.cc @@ -17,23 +17,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void Pool2dOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void Pool2dOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -65,13 +58,13 @@ void Pool2dOpConverter::operator()(const framework::proto::OpDesc &op, PADDLE_THROW("TensorRT unsupported pooling type!"); } - engine_->AddOp(op_name, "Pooling", {x_name}, {y_name}); - engine_->AddOpAttr>(op_name, "pool_size", ksize); - engine_->AddOpAttr>(op_name, "strides", strides); - engine_->AddOpAttr>(op_name, "padding", paddings); - engine_->AddOpAttr(op_name, "method", anakin_pool_type); - engine_->AddOpAttr(op_name, "global_pooling", global_pooling); - engine_->AddOpAttr(op_name, "cmp_out_shape_floor_as_conv", !ceil_mode); + this->engine_->AddOp(op_name, "Pooling", {x_name}, {y_name}); + this->engine_->template AddOpAttr>(op_name, "pool_size", ksize); + this->engine_->template AddOpAttr>(op_name, "strides", strides); + this->engine_->template AddOpAttr>(op_name, "padding", paddings); + this->engine_->AddOpAttr(op_name, "method", anakin_pool_type); + this->engine_->AddOpAttr(op_name, "global_pooling", global_pooling); + this->engine_->AddOpAttr(op_name, "cmp_out_shape_floor_as_conv", !ceil_mode); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/pool2d.h b/paddle/fluid/inference/anakin/convert/pool2d.h index ec28e48ac848eff1d37c39063725624bf7d65723..7a06ff1b660a4ccd28570337b67aff68e7bce6a7 100644 --- a/paddle/fluid/inference/anakin/convert/pool2d.h +++ b/paddle/fluid/inference/anakin/convert/pool2d.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class Pool2dOpConverter : public AnakinOpConverter { +template +class Pool2dOpConverter : public AnakinOpConverter { public: Pool2dOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/relu.cc b/paddle/fluid/inference/anakin/convert/relu.cc index 993437d014b1f951dac94da7a3179b4bcb63466d..00853406634bdf5c488d21aca8289826f3a93a16 100644 --- a/paddle/fluid/inference/anakin/convert/relu.cc +++ b/paddle/fluid/inference/anakin/convert/relu.cc @@ -16,19 +16,30 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -void ReluOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ReluOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + auto input_name = op_desc.Input("X").front(); + auto output_name = op_desc.Output("Out").front(); + + this->engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "alpha", 0); +} + +template +void LeakyReluOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -37,8 +48,9 @@ void ReluOpConverter::operator()(const framework::proto::OpDesc &op, auto input_name = op_desc.Input("X").front(); auto output_name = op_desc.Output("Out").front(); - engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "alpha", 0); + float alpha = boost::get(op_desc.GetAttr("alpha")); + this->engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "alpha", alpha); } } // namespace anakin @@ -46,3 +58,4 @@ void ReluOpConverter::operator()(const framework::proto::OpDesc &op, } // namespace paddle REGISTER_ANAKIN_OP_CONVERTER(relu, ReluOpConverter); +REGISTER_ANAKIN_OP_CONVERTER(leaky_relu, LeakyReluOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/relu.h b/paddle/fluid/inference/anakin/convert/relu.h index 6ede506511917c80faa59d40ee0a7bfff194da97..f366f05a94ae937c98c72e179a7bf14015a695ea 100644 --- a/paddle/fluid/inference/anakin/convert/relu.h +++ b/paddle/fluid/inference/anakin/convert/relu.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class ReluOpConverter : public AnakinOpConverter { +template +class ReluOpConverter : public AnakinOpConverter { public: ReluOpConverter() = default; @@ -33,6 +34,18 @@ class ReluOpConverter : public AnakinOpConverter { virtual ~ReluOpConverter() {} }; +template +class LeakyReluOpConverter : public AnakinOpConverter { + public: + LeakyReluOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) override; + virtual ~LeakyReluOpConverter() {} +}; + } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/reshape.cc b/paddle/fluid/inference/anakin/convert/reshape.cc index 17e0a1acb5f4e08e848e91bbb051757d85796c0a..d73736b7fecc758a2965f2d2afff9a808d6e2adc 100644 --- a/paddle/fluid/inference/anakin/convert/reshape.cc +++ b/paddle/fluid/inference/anakin/convert/reshape.cc @@ -15,20 +15,16 @@ #include "paddle/fluid/inference/anakin/convert/reshape.h" #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void ReshapeOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ReshapeOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1UL); @@ -37,13 +33,13 @@ void ReshapeOpConverter::operator()(const framework::proto::OpDesc &op, auto output = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Reshape", {input}, {output}); + this->engine_->AddOp(op_name, "Reshape", {input}, {output}); auto shape = boost::get>(op_desc.GetAttr("shape")); if (shape.size() < 4) { shape.insert(shape.end(), 4 - shape.size(), 1); } - engine_->AddOpAttr>(op_name, "dims", shape); + this->engine_->template AddOpAttr>(op_name, "dims", shape); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/reshape.h b/paddle/fluid/inference/anakin/convert/reshape.h index 9ce2ea2a4f3f8802225fe8ca8ed602c9f7d27968..88de2641e60f1a08cf11b1206be28eb516c575f1 100644 --- a/paddle/fluid/inference/anakin/convert/reshape.h +++ b/paddle/fluid/inference/anakin/convert/reshape.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class ReshapeOpConverter : public AnakinOpConverter { +template +class ReshapeOpConverter : public AnakinOpConverter { public: ReshapeOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/roi_align.cc b/paddle/fluid/inference/anakin/convert/roi_align.cc new file mode 100644 index 0000000000000000000000000000000000000000..8702f638e10bbf72fa43d45e0042c16ffae447f1 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/roi_align.cc @@ -0,0 +1,54 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/roi_align.h" +#include +#include + +namespace paddle { +namespace inference { +namespace anakin { + +template +void RoiAlignOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("ROIs").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + auto input_x_name = op_desc.Input("X").front(); + auto input_rois_name = op_desc.Input("ROIs").front(); + auto output_name = op_desc.Output("Out").front(); + + auto spatial_scale = boost::get(op_desc.GetAttr("spatial_scale")); + auto pooled_height = boost::get(op_desc.GetAttr("pooled_height")); + auto pooled_width = boost::get(op_desc.GetAttr("pooled_width")); + auto sampling_ratio = boost::get(op_desc.GetAttr("sampling_ratio")); + + this->engine_->AddOp(op_name, "RoiAlign", {input_x_name, input_rois_name}, + {output_name}); + this->engine_->AddOpAttr(op_name, "spatial_scale", spatial_scale); + this->engine_->AddOpAttr(op_name, "pooled_height", pooled_height); + this->engine_->AddOpAttr(op_name, "pooled_width", pooled_width); + this->engine_->AddOpAttr(op_name, "sampling_ratio", sampling_ratio); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(roi_align, RoiAlignOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/roi_align.h b/paddle/fluid/inference/anakin/convert/roi_align.h new file mode 100644 index 0000000000000000000000000000000000000000..8b5d23a01676f035174aa6fd2d2a79582fc1e2e0 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/roi_align.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +template +class RoiAlignOpConverter : public AnakinOpConverter { + public: + RoiAlignOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) override; + virtual ~RoiAlignOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/scale.cc b/paddle/fluid/inference/anakin/convert/scale.cc index dd68af4f79a6d1e8add04bde6a6890bca1b00d14..2559ec498c8ba423bf894b1ec67e24bd2567ff2b 100644 --- a/paddle/fluid/inference/anakin/convert/scale.cc +++ b/paddle/fluid/inference/anakin/convert/scale.cc @@ -16,19 +16,14 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -void ScaleOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void ScaleOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -44,10 +39,10 @@ void ScaleOpConverter::operator()(const framework::proto::OpDesc &op, PADDLE_ENFORCE(bias_after_scale, "The anakin scale layer only support bias after scale now."); - engine_->AddOp(op_name, "Power", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "shift", bias); - engine_->AddOpAttr(op_name, "scale", scale); - engine_->AddOpAttr(op_name, "power", static_cast(1.0)); + this->engine_->AddOp(op_name, "Power", {input_name}, {output_name}); + this->engine_->AddOpAttr(op_name, "shift", bias); + this->engine_->AddOpAttr(op_name, "scale", scale); + this->engine_->AddOpAttr(op_name, "power", static_cast(1.0)); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/scale.h b/paddle/fluid/inference/anakin/convert/scale.h index ba3bcdd21494a4eeb6190aa8383e17e1b828b5f3..f19a92019349718ccd961d2dc2968ad479ff1a3c 100644 --- a/paddle/fluid/inference/anakin/convert/scale.h +++ b/paddle/fluid/inference/anakin/convert/scale.h @@ -22,7 +22,8 @@ namespace paddle { namespace inference { namespace anakin { -class ScaleOpConverter : public AnakinOpConverter { +template +class ScaleOpConverter : public AnakinOpConverter { public: ScaleOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/shuffle_channel.cc b/paddle/fluid/inference/anakin/convert/shuffle_channel.cc new file mode 100644 index 0000000000000000000000000000000000000000..fdd2e3182e34992205d7707b83efbc3c6421076c --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/shuffle_channel.cc @@ -0,0 +1,47 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/shuffle_channel.h" +#include +#include +#include + +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +template +void ShuffleChannelOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto input = op_desc.Input("X").front(); + auto output = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + this->engine_->AddOp(op_name, "ShuffleChannel", {input}, {output}); + + auto group = boost::get(op_desc.GetAttr("group")); + this->engine_->AddOpAttr(op_name, "group", group); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(shuffle_channel, ShuffleChannelOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/shuffle_channel.h b/paddle/fluid/inference/anakin/convert/shuffle_channel.h new file mode 100644 index 0000000000000000000000000000000000000000..457a14865a91bd6cfa763513f01cda72e34186e8 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/shuffle_channel.h @@ -0,0 +1,38 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +template +class ShuffleChannelOpConverter + : public AnakinOpConverter { + public: + ShuffleChannelOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::BlockDesc &block_desc, + const framework::Scope &scope, + bool test_mode) override; + virtual ~ShuffleChannelOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/softmax.cc b/paddle/fluid/inference/anakin/convert/softmax.cc index a6c1e971b16fa7fe6a074bcb2cdf391410f8871f..a4dc5a9156b8f54cf8915e2a8829ada22d442ace 100644 --- a/paddle/fluid/inference/anakin/convert/softmax.cc +++ b/paddle/fluid/inference/anakin/convert/softmax.cc @@ -14,19 +14,14 @@ #include "paddle/fluid/inference/anakin/convert/softmax.h" -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; - namespace paddle { namespace inference { namespace anakin { -void SoftMaxOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void SoftMaxOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); @@ -41,8 +36,8 @@ void SoftMaxOpConverter::operator()(const framework::proto::OpDesc &op, auto input_shape_in_fluid = input_var_desc->GetShape(); size_t input_dims = input_shape_in_fluid.size(); - engine_->AddOp(op_name, "Softmax", {input}, {output}); - engine_->AddOpAttr(op_name, "axis", static_cast(input_dims - 1)); + this->engine_->AddOp(op_name, "Softmax", {input}, {output}); + this->engine_->AddOpAttr(op_name, "axis", static_cast(input_dims - 1)); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/softmax.h b/paddle/fluid/inference/anakin/convert/softmax.h index a16356d5bb61ac2f3b4f7751e257ce36ca604bf1..dc431b5b867a2679050fa5b0128640678f36d210 100644 --- a/paddle/fluid/inference/anakin/convert/softmax.h +++ b/paddle/fluid/inference/anakin/convert/softmax.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class SoftMaxOpConverter : public AnakinOpConverter { +template +class SoftMaxOpConverter : public AnakinOpConverter { public: SoftMaxOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/split.cc b/paddle/fluid/inference/anakin/convert/split.cc index ec582c1812623cd4bcefa2097015ba258f6bacbb..e63edea94ae010f3bd2240fd21147642f647581e 100644 --- a/paddle/fluid/inference/anakin/convert/split.cc +++ b/paddle/fluid/inference/anakin/convert/split.cc @@ -16,23 +16,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void SplitOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void SplitOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); auto input_name = op_desc.Input("X").front(); auto y_names = op_desc.Output("Out"); @@ -51,14 +44,16 @@ void SplitOpConverter::operator()(const framework::proto::OpDesc &op, num_sum += output_lengths[i]; slice_point.push_back(num_sum); } - engine_->AddOp(op_name, "Slice", {input_name}, y_names); - engine_->AddOpAttr(op_name, "axis", axis); - engine_->AddOpAttr>(op_name, "slice_point", slice_point); + this->engine_->AddOp(op_name, "Slice", {input_name}, y_names); + this->engine_->AddOpAttr(op_name, "axis", axis); + this->engine_->template AddOpAttr>(op_name, "slice_point", + slice_point); // slice_dim is useless in anakin - engine_->AddOpAttr(op_name, "slice_dim", 4); + this->engine_->AddOpAttr(op_name, "slice_dim", 4); } } // namespace anakin } // namespace inference } // namespace paddle + REGISTER_ANAKIN_OP_CONVERTER(split, SplitOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/split.h b/paddle/fluid/inference/anakin/convert/split.h index 184112e589e2bbdb30bc7a5d2cd053b7f3732a58..819915315d90a500772756d1a21a5454694b9c0a 100644 --- a/paddle/fluid/inference/anakin/convert/split.h +++ b/paddle/fluid/inference/anakin/convert/split.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class SplitOpConverter : public AnakinOpConverter { +template +class SplitOpConverter : public AnakinOpConverter { public: SplitOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/sum.cc b/paddle/fluid/inference/anakin/convert/sum.cc index 2a4178e2371389b44557d44ea526c7cc4a731d16..870c07934090370a05ad5e8a2e68af8f314e25ae 100644 --- a/paddle/fluid/inference/anakin/convert/sum.cc +++ b/paddle/fluid/inference/anakin/convert/sum.cc @@ -17,22 +17,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void SumOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, bool test_mode) { +template +void SumOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 2); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -43,9 +37,10 @@ void SumOpConverter::operator()(const framework::proto::OpDesc &op, std::vector coeff = {1, 1}; std::string elementwise_type = "Add"; - engine_->AddOp(op_name, "Eltwise", input_names, {out_name}); - engine_->AddOpAttr>(op_name, "coeff", coeff); - engine_->AddOpAttr(op_name, "type", elementwise_type); + this->engine_->AddOp(op_name, "Eltwise", input_names, {out_name}); + this->engine_->template AddOpAttr>(op_name, "coeff", coeff); + this->engine_->template AddOpAttr(op_name, "type", + elementwise_type); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/sum.h b/paddle/fluid/inference/anakin/convert/sum.h index b5d402b77fcf555ffaf910f8c9d1b7337181a64b..aefc64c623e916ee42604fed771d6985c4dcfd1d 100644 --- a/paddle/fluid/inference/anakin/convert/sum.h +++ b/paddle/fluid/inference/anakin/convert/sum.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class SumOpConverter : public AnakinOpConverter { +template +class SumOpConverter : public AnakinOpConverter { public: SumOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/test_activation_op.cc b/paddle/fluid/inference/anakin/convert/test_activation_op.cc index 8bedd4a749a645829658291310347eeed1c0ea49..4f898252d2798022d09f65e03b3cde35fcb6730c 100644 --- a/paddle/fluid/inference/anakin/convert/test_activation_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_activation_op.cc @@ -21,12 +21,14 @@ namespace paddle { namespace inference { namespace anakin { -static void test_activation_op(const std::string &op_type) { - auto *converter = Registry::Global().Lookup(op_type); - PADDLE_ENFORCE(converter != nullptr); +template +static void test_activation_op(const std::string& op_type, + const platform::DeviceContext& context, + bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("act-X", {10, 6, 1, 1}); validator.DeclOutputVar("act-Out", {10, 6, 1, 1}); framework::OpDesc desc; @@ -34,6 +36,14 @@ static void test_activation_op(const std::string &op_type) { desc.SetInput("X", {"act-X"}); desc.SetOutput("Out", {"act-Out"}); + if (op_type == "swish") { + desc.SetAttr("beta", 1.0f); + } + + if (op_type == "relu6") { + desc.SetAttr("threshold", 6.0f); + } + LOG(INFO) << "set OP"; validator.SetOp(*desc.Proto()); LOG(INFO) << "execute"; @@ -41,13 +51,74 @@ static void test_activation_op(const std::string &op_type) { validator.Execute(5); } -TEST(sigm_op, test) { test_activation_op("sigmoid"); } -TEST(tanh_op, test) { test_activation_op("tanh"); } +#ifdef PADDLE_WITH_CUDA +TEST(sigm_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("sigmoid", ctx, true); +} + +TEST(tanh_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("tanh", ctx, true); +} + +TEST(relu6_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("relu6", ctx, true); +} + +TEST(swish_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("swish", ctx, true); +} +#endif + +/* +TEST(sigm_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_activation_op<::anakin::saber::X86>("sigmoid", ctx, false); +} + +TEST(tanh_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_activation_op<::anakin::saber::X86>("tanh", ctx, false); +} + +TEST(relu6_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_activation_op<::anakin::saber::X86>("relu6", ctx, false); +} + +TEST(swish_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_activation_op<::anakin::saber::X86>("swish", ctx, false); +} +*/ + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(sigmoid); USE_OP(tanh); +USE_OP(relu6); +USE_OP(swish); + +USE_CPU_ANAKIN_CONVERTER(sigmoid); +USE_CPU_ANAKIN_CONVERTER(tanh); +USE_CPU_ANAKIN_CONVERTER(relu6); +USE_CPU_ANAKIN_CONVERTER(swish); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(sigmoid); USE_ANAKIN_CONVERTER(tanh); +USE_ANAKIN_CONVERTER(relu6); +USE_ANAKIN_CONVERTER(swish); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc b/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..f6399387aa264d993462d33011a4cddaa4a23359 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_affine_channel_op.cc @@ -0,0 +1,75 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/affine_channel.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +template +void test_affine_channel_op(const platform::DeviceContext& context, + bool use_gpu) { + // Declare the difference between the inputs. + std::unordered_set parameters({"scale", "bias"}); + + framework::Scope scope; + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); + validator.DeclInputVar("x", {1, 3, 5, 2}); + validator.DeclOutputVar("out", {1, 3, 5, 2}); + validator.DeclParamVar("scale", {3}); + validator.DeclParamVar("bias", {3}); + + // Prepare Op descriptions. + framework::OpDesc desc; + desc.SetType("affine_channel"); + desc.SetInput("X", {"x"}); + desc.SetInput("Bias", {"bias"}); + desc.SetInput("Scale", {"scale"}); + desc.SetOutput("Out", {"out"}); + + // Layout must be explicitly specified here as NCHW. + desc.SetAttr("data_layout", std::string("NCHW")); + + validator.SetOp(*desc.Proto()); + validator.Execute(1); +} + +#ifdef PADDLE_WITH_CUDA +TEST(affine_channel_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_affine_channel_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(affine_channel_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_affine_channel_op<::anakin::saber::X86>(ctx, false); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(affine_channel); +USE_CPU_ANAKIN_CONVERTER(affine_channel); +#ifdef PADDLE_WITH_CUDA +USE_ANAKIN_CONVERTER(affine_channel); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc b/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc index 2832e1c8d167c646c9049beebc57a82fe416e62c..c008ef1bd5ee258b7d3095ac7836a9eacb2cf83a 100644 --- a/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc @@ -19,12 +19,14 @@ namespace paddle { namespace inference { namespace anakin { -TEST(batch_norm_op, test) { +template +void test_batchnorm_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters( {"batch_norm_scale", "batch_norm_bias", "batch_norm_mean", "batch_norm_variance"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); std::vector param_shape{2}; validator.DeclInputVar("batch_norm_X", {1, 2, 5, 5}); @@ -64,8 +66,26 @@ TEST(batch_norm_op, test) { validator.Execute(1, neglected_output); } +#ifdef PADDLE_WITH_CUDA +TEST(batch_norm_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_batchnorm_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(batch_norm_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_batchnorm_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(batch_norm); +USE_CPU_ANAKIN_CONVERTER(batch_norm); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(batch_norm); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_concat_op.cc b/paddle/fluid/inference/anakin/convert/test_concat_op.cc index ecf44def5a2429360f0bcb92f00a0423e1d491cd..42dfbeb5cdc4062143385bde569c3d80f1c774c9 100644 --- a/paddle/fluid/inference/anakin/convert/test_concat_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_concat_op.cc @@ -21,10 +21,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(concat_op, test) { +template +void test_concat_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({""}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("concat_x1", {1, 2, 1, 1}); validator.DeclInputVar("concat_x2", {1, 3, 1, 1}); validator.DeclInputVar("concat_x3", {1, 1, 1, 1}); @@ -44,31 +46,26 @@ TEST(concat_op, test) { validator.Execute(1); } -TEST(concat_op, test2) { - std::unordered_set parameters({""}); - framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); - validator.DeclInputVar("concat_x1", {1, 4}); - validator.DeclInputVar("concat_x2", {3, 4}); - validator.DeclInputVar("concat_x3", {2, 4}); - validator.DeclOutputVar("concat_out", {6, 4}); - - // Prepare Op description - framework::OpDesc desc; - desc.SetType("concat"); - desc.SetInput("X", {"concat_x1", "concat_x2", "concat_x3"}); - desc.SetOutput("Out", {"concat_out"}); - - int axis = 0; - desc.SetAttr("axis", axis); - - validator.SetOp(*desc.Proto()); +#ifdef PADDLE_WITH_CUDA +TEST(concat_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_concat_op<::anakin::saber::NV>(ctx, true); +} +#endif - validator.Execute(1); +TEST(concat_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_concat_op<::anakin::saber::X86>(ctx, false); } } // namespace anakin } // namespace inference } // namespace paddle USE_OP(concat); +USE_CPU_ANAKIN_CONVERTER(concat); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(concat); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc b/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc index 6d93e50bc96b08b6ef7dd7c9d836038e335daae3..e95e11c4f968814fb225dee8a7750a47ee6a976e 100644 --- a/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc @@ -21,13 +21,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(conv2d_op, test) { - auto* conv2d_converter = - Registry::Global().Lookup("conv2d"); - ASSERT_TRUE(conv2d_converter != nullptr); +template +void test_conv2d_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({"conv2d-Y"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("conv2d-X", {1, 3, 3, 3}); validator.DeclParamVar("conv2d-Y", {4, 3, 1, 1}); validator.DeclOutputVar("conv2d-Out", {1, 4, 3, 3}); @@ -54,9 +53,27 @@ TEST(conv2d_op, test) { validator.Execute(3); } +#ifdef PADDLE_WITH_CUDA +TEST(conv2d_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_conv2d_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(conv2d_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_conv2d_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(conv2d); +USE_CPU_ANAKIN_CONVERTER(conv2d); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(conv2d); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_dropout_op.cc b/paddle/fluid/inference/anakin/convert/test_dropout_op.cc index b2de5ae0a6e58eb25a4588571686a25500fe546c..ae27e27ded5d9252e85c0da5d24b777e1a189b63 100644 --- a/paddle/fluid/inference/anakin/convert/test_dropout_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_dropout_op.cc @@ -21,10 +21,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(dropout_op, native) { +template +void test_dropout_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("x", {1, 1, 2, 2}); validator.DeclOutputVar("out", {1, 1, 2, 2}); validator.DeclOutputVar("mask", {1, 1, 2, 2}); @@ -45,9 +47,26 @@ TEST(dropout_op, native) { validator.Execute(1, neglected_output); } +#ifdef PADDLE_WITH_CUDA +TEST(dropout_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_dropout_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(dropout_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_dropout_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(dropout); +USE_CPU_ANAKIN_CONVERTER(dropout); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(dropout); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc b/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc index 3a437f5fdb565609667b7a862c9b2bb13cdbeded..bff75294908aab2997fbb1138a53112f22afe312 100644 --- a/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc @@ -21,10 +21,14 @@ namespace paddle { namespace inference { namespace anakin { -static void test_elementwise_op(const std::string &op_type) { +template +static void test_elementwise_op(const std::string& op_type, + const platform::DeviceContext& context, + bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("x", {1, 1, 2, 2}); validator.DeclInputVar("y", {1, 1, 2, 2}); validator.DeclOutputVar("out", {1, 1, 2, 2}); @@ -43,14 +47,41 @@ static void test_elementwise_op(const std::string &op_type) { validator.Execute(1); } -TEST(elementwise_op, native_add) { test_elementwise_op("elementwise_add"); } -TEST(elementwise_op, native_mul) { test_elementwise_op("elementwise_mul"); } +#ifdef PADDLE_WITH_CUDA +TEST(elementwise_op, native_add_gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_elementwise_op<::anakin::saber::NV>("elementwise_add", ctx, true); +} +TEST(elementwise_op, native_mul_gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_elementwise_op<::anakin::saber::NV>("elementwise_mul", ctx, true); +} +#endif + +TEST(elementwise_op, native_add_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_elementwise_op<::anakin::saber::X86>("elementwise_add", ctx, false); +} + +TEST(elementwise_op, native_mul_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_elementwise_op<::anakin::saber::X86>("elementwise_mul", ctx, false); +} } // namespace anakin } // namespace inference } // namespace paddle USE_OP(elementwise_add); -USE_ANAKIN_CONVERTER(elementwise_add); USE_OP(elementwise_mul); +#ifdef PADDLE_WITH_CUDA +USE_ANAKIN_CONVERTER(elementwise_add); USE_ANAKIN_CONVERTER(elementwise_mul); +#endif + +USE_CPU_ANAKIN_CONVERTER(elementwise_add); +USE_CPU_ANAKIN_CONVERTER(elementwise_mul); diff --git a/paddle/fluid/inference/anakin/convert/test_fc_op.cc b/paddle/fluid/inference/anakin/convert/test_fc_op.cc index ee6d1dc291fe3733ff2e9f66dd453120fa266a55..a24c809c0221322256d985bc5c71c97afe9bd31f 100644 --- a/paddle/fluid/inference/anakin/convert/test_fc_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_fc_op.cc @@ -20,13 +20,13 @@ namespace paddle { namespace inference { namespace anakin { -TEST(fc_op, test) { - auto* fc_converter = Registry::Global().Lookup("fc"); - ASSERT_TRUE(fc_converter); - +template +void test_mul_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters({"mul_y"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("mul_x", {1, 1, 2, 2}); validator.DeclParamVar("mul_y", {4, 2}); validator.DeclOutputVar("mul_out", {1, 2}); @@ -42,9 +42,26 @@ TEST(fc_op, test) { validator.Execute(10); } +#ifdef PADDLE_WITH_CUDA +TEST(mul_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_mul_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(mul_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_mul_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(mul); +USE_CPU_ANAKIN_CONVERTER(fc); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(fc); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_flatten_op.cc b/paddle/fluid/inference/anakin/convert/test_flatten_op.cc index d13281f11f03fdd75e585bce8b30e8780d81f7d7..5765f5ebd1f2a0c3adaee95f796273d51284f9e1 100644 --- a/paddle/fluid/inference/anakin/convert/test_flatten_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_flatten_op.cc @@ -20,13 +20,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(flatten_op, test) { - auto *converter = Registry::Global().Lookup("flatten"); - ASSERT_TRUE(converter); - +template +void test_flatten_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("flatten-X", {3, 10, 10, 4}); validator.DeclOutputVar("flatten-Out", {3, 400, 1, 1}); framework::OpDesc desc; @@ -42,10 +41,27 @@ TEST(flatten_op, test) { validator.Execute(5); } +#ifdef PADDLE_WITH_CUDA +TEST(flatten_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_flatten_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(flatten_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_flatten_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(reshape); USE_OP_ITSELF(flatten); +USE_CPU_ANAKIN_CONVERTER(flatten); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(flatten); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc b/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc index 1ac019467721605c539c7ada452d04d5134fa341..90503b1fbba81eb20b5d32bca32ab109245fc07c 100644 --- a/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc @@ -19,15 +19,14 @@ namespace paddle { namespace inference { namespace anakin { -void test_pool2d(bool global_pooling, bool ceil_mode, +template +void test_pool2d(const platform::DeviceContext& context, bool use_gpu, + bool global_pooling, bool ceil_mode, std::string pool_type = "max") { - auto* pool2d_converter = - Registry::Global().Lookup("pool2d"); - ASSERT_TRUE(pool2d_converter); - framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); // The ITensor's Dims should not contain the batch size. // So, the ITensor's Dims of input and output should be C * H * W. @@ -64,56 +63,61 @@ void test_pool2d(bool global_pooling, bool ceil_mode, validator.Execute(1); } -void test_pool2d2(bool global_pooling, bool ceil_mode, - std::string pool_type = "max") { - auto* pool2d_converter = - Registry::Global().Lookup("pool2d"); - ASSERT_TRUE(pool2d_converter); - - framework::Scope scope; - std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); - - // The ITensor's Dims should not contain the batch size. - // So, the ITensor's Dims of input and output should be C * H * W. - validator.DeclInputVar("pool2d_x", {1, 1, 17, 17}); - validator.DeclOutputVar("pool2d_out", {1, 1, 17, 17}); - - // Prepare Op description - framework::OpDesc desc; - desc.SetType("pool2d"); - desc.SetInput("X", {"pool2d_x"}); - desc.SetOutput("Out", {"pool2d_out"}); - - std::vector ksize({3, 3}); - std::vector strides({1, 1}); - std::vector paddings({1, 1}); - std::string pooling_t = pool_type; +#ifdef PADDLE_WITH_CUDA +TEST(Pool2dOpConverter, normal) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_pool2d<::anakin::saber::NV>(ctx, true, false, false); +} +TEST(Pool2dOpConverter, test_global_pooling) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_pool2d<::anakin::saber::NV>(ctx, true, true, false); +} - desc.SetAttr("pooling_type", pooling_t); - desc.SetAttr("ksize", ksize); - desc.SetAttr("strides", strides); - desc.SetAttr("paddings", paddings); - desc.SetAttr("global_pooling", global_pooling); - desc.SetAttr("ceil_mode", true); +TEST(Pool2dOpConverter, max_ceil_test) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_pool2d<::anakin::saber::NV>(ctx, true, false, true); +} - LOG(INFO) << "set OP"; - validator.SetOp(*desc.Proto()); - LOG(INFO) << "execute"; +TEST(Pool2dOpConverter, avg_ceil_test) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_pool2d<::anakin::saber::NV>(ctx, true, false, true, "avg"); +} +#endif - validator.Execute(1); +TEST(Pool2dOpConverter, normal_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_pool2d<::anakin::saber::X86>(ctx, false, false, false); +} +TEST(Pool2dOpConverter, test_global_pooling_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_pool2d<::anakin::saber::X86>(ctx, false, true, false); } -TEST(Pool2dOpConverter, normal) { test_pool2d(false, false); } -TEST(Pool2dOpConverter, test_global_pooling) { test_pool2d(true, false); } +TEST(Pool2dOpConverter, max_ceil_test_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_pool2d<::anakin::saber::X86>(ctx, false, false, true); +} -TEST(Pool2dOpConverter, max_ceil_test) { test_pool2d(false, true); } -TEST(Pool2dOpConverter, avg_ceil_test) { test_pool2d(false, true, "avg"); } -TEST(Pool2dOpConverter, avg_ceil_test2) { test_pool2d2(false, true, "avg"); } +TEST(Pool2dOpConverter, avg_ceil_test_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_pool2d<::anakin::saber::X86>(ctx, false, false, true, "avg"); +} } // namespace anakin } // namespace inference } // namespace paddle USE_OP(pool2d); +USE_CPU_ANAKIN_CONVERTER(pool2d); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(pool2d); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_relu_op.cc b/paddle/fluid/inference/anakin/convert/test_relu_op.cc index 04e624518a5a4477bbb41475b575f85be5a120d4..3f224796519650a4a26907f9db58c5c8aab56e4f 100644 --- a/paddle/fluid/inference/anakin/convert/test_relu_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_relu_op.cc @@ -21,18 +21,23 @@ namespace paddle { namespace inference { namespace anakin { -static void test_activation_op(const std::string &op_type) { - auto *converter = Registry::Global().Lookup(op_type); - PADDLE_ENFORCE(converter != nullptr); +template +static void test_activation_op(const std::string& op_type, + const platform::DeviceContext& context, + bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("act-X", {10, 6, 1, 1}); validator.DeclOutputVar("act-Out", {10, 6, 1, 1}); framework::OpDesc desc; desc.SetType(op_type); desc.SetInput("X", {"act-X"}); desc.SetOutput("Out", {"act-Out"}); + if (op_type == "leaky_relu") { + desc.SetAttr("alpha", 0.1f); + } LOG(INFO) << "set OP"; validator.SetOp(*desc.Proto()); @@ -41,10 +46,30 @@ static void test_activation_op(const std::string &op_type) { validator.Execute(5); } -TEST(sigm_op, test) { test_activation_op("relu"); } +#ifdef PADDLE_WITH_CUDA +TEST(relu_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("relu", ctx, true); +} + +TEST(leaky_relu_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_activation_op<::anakin::saber::NV>("leaky_relu", ctx, true); +} +#endif + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(relu); +USE_OP(leaky_relu); +USE_CPU_ANAKIN_CONVERTER(relu); +USE_CPU_ANAKIN_CONVERTER(leaky_relu); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(relu); +USE_ANAKIN_CONVERTER(leaky_relu); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_reshape_op.cc b/paddle/fluid/inference/anakin/convert/test_reshape_op.cc index 306ebf510f29a87ca1ffa6df86e08f86b3f8ffbb..e102bd3ac3ea0d5e0ec0fd46b243f38c13af9580 100644 --- a/paddle/fluid/inference/anakin/convert/test_reshape_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_reshape_op.cc @@ -20,12 +20,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(reshape, test) { - auto* converter = Registry::Global().Lookup("reshape"); - ASSERT_TRUE(converter); +template +void test_reshape1_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); // validator.DeclInputVar("reshape-X", {2, 3, 3, 1}); // validator.DeclOutputVar("reshape-Out", {3, 2, 1, 3}); @@ -45,10 +45,12 @@ TEST(reshape, test) { validator.Execute(1); } -TEST(reshape, test2) { +template +void test_reshape2_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("reshape-X", {1, 2, 4}); validator.DeclOutputVar("reshape-Out", {1, 4, 2}); @@ -66,9 +68,39 @@ TEST(reshape, test2) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(reshape1_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_reshape1_op<::anakin::saber::NV>(ctx, true); +} + +TEST(reshape2_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_reshape2_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(reshape1_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_reshape2_op<::anakin::saber::X86>(ctx, false); +} + +TEST(reshape2_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_reshape2_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(reshape); +USE_CPU_ANAKIN_CONVERTER(reshape); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(reshape); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_softmax_op.cc b/paddle/fluid/inference/anakin/convert/test_softmax_op.cc index 8c14fae0a67b9e488cf072535868a34f6195ab71..de0b18fdbfd5f721fcd397a621bcee04ff5e5ae4 100644 --- a/paddle/fluid/inference/anakin/convert/test_softmax_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_softmax_op.cc @@ -20,12 +20,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(softmax, test) { - auto* converter = Registry::Global().Lookup("softmax"); - ASSERT_TRUE(converter); +template +void test_softmax_op(const platform::DeviceContext& context, bool use_gpu) { framework::Scope scope; std::unordered_set parameters; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("softmax-X", {1, 10, 2}); validator.DeclOutputVar("softmax-Out", {1, 10, 2}); @@ -41,9 +41,27 @@ TEST(softmax, test) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(softmax_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_softmax_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(relu_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_softmax_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(softmax); +USE_CPU_ANAKIN_CONVERTER(softmax); + +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(softmax); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_split_op.cc b/paddle/fluid/inference/anakin/convert/test_split_op.cc index aa61c01a511c2337944aadbbc3d47893487de683..9a42ffd853bb071cfa1a21b17450124dc46f8211 100644 --- a/paddle/fluid/inference/anakin/convert/test_split_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_split_op.cc @@ -21,12 +21,14 @@ namespace paddle { namespace inference { namespace anakin { -template -void AnakinSliceTest(const std::vector &in_shape, +template +void AnakinSliceTest(const platform::DeviceContext &context, bool use_gpu, + const std::vector &in_shape, const std::vector §ions) { std::unordered_set parameters({""}); framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("split_input", in_shape); std::vector output_vars; @@ -55,51 +57,58 @@ void AnakinSliceTest(const std::vector &in_shape, // batch = 0, axis = 1, same shape TEST(split_op, test_same_shape_axis1_batch1) { - AnakinSliceTest<1>({1, 4, 2, 2}, {2, 2}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 1>(ctx, true, {1, 4, 2, 2}, {2, 2}); } // batch = 0, axis = 1, different shape TEST(split_op, test_different_shape_axis1_batch1) { - AnakinSliceTest<1>({1, 3, 2, 2}, {2, 1}); -} -// batch = 10, axis = 1, same shape -TEST(split_op, test_same_shape_axis1_batch10) { - AnakinSliceTest<1>({1, 4, 2, 2}, {2, 2}); -} -// batch = 10, axis = 1, different shape -TEST(split_op, test_different_shape_axis1_batch10) { - AnakinSliceTest<1>({1, 3, 2, 2}, {2, 1}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 1>(ctx, true, {1, 3, 2, 2}, {2, 1}); } // batch = 0, axis = 2, same shape TEST(split_op, test_same_shape_axis2_batch1) { - AnakinSliceTest<2>({1, 3, 4, 2}, {2, 2}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 2>(ctx, true, {1, 3, 4, 2}, {2, 2}); } // batch = 0, axis = 2, different shape TEST(split_op, test_different_shape_axis2_batch1) { - AnakinSliceTest<2>({1, 3, 3, 2}, {2, 1}); -} -// batch = 10, axis = 2, same shape -TEST(split_op, test_same_shape_axis2_batch10) { - AnakinSliceTest<2>({1, 3, 4, 2}, {2, 2}); -} -// batch = 10, axis = 2, different shape -TEST(split_op, test_different_shape_axis2_batch10) { - AnakinSliceTest<2>({1, 3, 3, 2}, {2, 1}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 2>(ctx, true, {1, 3, 3, 2}, {2, 1}); } + // batch = 0, axis = 3, same shape TEST(split_op, test_same_shape_axis3_batch1) { - AnakinSliceTest<3>({1, 3, 2, 4}, {2, 2}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 3>(ctx, true, {1, 3, 2, 4}, {2, 2}); } // batch = 0, axis = 3, different shape TEST(split_op, test_different_shape_axis3_batch1) { - AnakinSliceTest<3>({1, 3, 2, 3}, {2, 1}); + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + AnakinSliceTest<::anakin::saber::NV, 3>(ctx, true, {1, 3, 2, 3}, {2, 1}); } -// batch = 10, axis = 3, same shape -TEST(split_op, test_same_shape_axis3_batch10) { - AnakinSliceTest<3>({1, 3, 2, 4}, {2, 2}); + +TEST(split_op, test_different_shape_axis1_batch1_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + AnakinSliceTest<::anakin::saber::X86, 1>(ctx, false, {1, 3, 2, 3}, {2, 1}); +} + +TEST(split_op, test_different_shape_axis2_batch1_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + AnakinSliceTest<::anakin::saber::X86, 2>(ctx, false, {1, 3, 4, 2}, {2, 2}); } -// batch = 10, axis = 3, different shape -TEST(split_op, test_different_shape_axis3_batch10) { - AnakinSliceTest<3>({1, 3, 2, 3}, {2, 1}); + +TEST(split_op, test_different_shape_axis3_batch1_cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + AnakinSliceTest<::anakin::saber::X86, 3>(ctx, false, {1, 3, 2, 4}, {2, 2}); } } // namespace anakin @@ -107,4 +116,7 @@ TEST(split_op, test_different_shape_axis3_batch10) { } // namespace paddle USE_OP(split); +USE_CPU_ANAKIN_CONVERTER(split); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(split); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_sum_op.cc b/paddle/fluid/inference/anakin/convert/test_sum_op.cc index d6a59a0166be9239b480221cc076069239403429..65f67ebd129893f553dc5b1663dcea377653b463 100644 --- a/paddle/fluid/inference/anakin/convert/test_sum_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_sum_op.cc @@ -22,10 +22,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(sum, native) { +template +static void test_sum_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("sum_x1", {1, 2, 1, 2}); validator.DeclInputVar("sum_x2", {1, 2, 1, 2}); validator.DeclOutputVar("sum_out", {1, 2, 1, 2}); @@ -40,9 +42,26 @@ TEST(sum, native) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(sum_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_sum_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(sum_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_sum_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(sum); +USE_CPU_ANAKIN_CONVERTER(sum); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(sum); +#endif diff --git a/paddle/fluid/inference/anakin/convert/test_transpose_op.cc b/paddle/fluid/inference/anakin/convert/test_transpose_op.cc index 016ed26f02f782fe5032d8368f7767a5c94dfe9f..51b69dfbb08b7335590407fec2068baff1a8d046 100644 --- a/paddle/fluid/inference/anakin/convert/test_transpose_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_transpose_op.cc @@ -20,12 +20,12 @@ namespace paddle { namespace inference { namespace anakin { -TEST(transpose_op, test) { - auto* converter = Registry::Global().Lookup("transpose"); - ASSERT_TRUE(converter != nullptr); +template +void test_transpose1_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("transpose-X", {2, 3, 4, 5}); validator.DeclOutputVar("transpose-Out", {4, 2, 5, 3}); @@ -43,11 +43,12 @@ TEST(transpose_op, test) { validator.Execute(3); } -// test input shape's dims < 4 -TEST(transpose_op, test2) { +template +void test_transpose2_op(const platform::DeviceContext& context, bool use_gpu) { std::unordered_set parameters; framework::Scope scope; - AnakinConvertValidation validator(parameters, &scope); + AnakinConvertValidation validator( + parameters, &scope, context, use_gpu); validator.DeclInputVar("transpose-X", {3, 4, 5}); validator.DeclOutputVar("transpose-Out", {3, 5, 4}); @@ -65,9 +66,38 @@ TEST(transpose_op, test2) { validator.Execute(1); } +#ifdef PADDLE_WITH_CUDA +TEST(transpose1_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_transpose1_op<::anakin::saber::NV>(ctx, true); +} + +TEST(transpose2_op, gpu) { + platform::CUDAPlace gpu_place(0); + platform::CUDADeviceContext ctx(gpu_place); + test_transpose2_op<::anakin::saber::NV>(ctx, true); +} +#endif + +TEST(transpose1_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_transpose2_op<::anakin::saber::X86>(ctx, false); +} + +TEST(transpose2_op, cpu) { + platform::CPUPlace cpu_place; + platform::CPUDeviceContext ctx(cpu_place); + test_transpose2_op<::anakin::saber::X86>(ctx, false); +} + } // namespace anakin } // namespace inference } // namespace paddle USE_OP(transpose); +USE_CPU_ANAKIN_CONVERTER(transpose); +#ifdef PADDLE_WITH_CUDA USE_ANAKIN_CONVERTER(transpose); +#endif diff --git a/paddle/fluid/inference/anakin/convert/transpose.cc b/paddle/fluid/inference/anakin/convert/transpose.cc index f35372fe5c315ec68bc80a6d03c5931899ff7555..28071ca8449cdd61799011308a992eacd51dfd38 100644 --- a/paddle/fluid/inference/anakin/convert/transpose.cc +++ b/paddle/fluid/inference/anakin/convert/transpose.cc @@ -17,20 +17,16 @@ #include #include -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::saber::NV; -using anakin::saber::Shape; using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void TransposeOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::BlockDesc &block_desc, - const framework::Scope &scope, - bool test_mode) { +template +void TransposeOpConverter::operator()( + const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc, + const framework::Scope &scope, bool test_mode) { framework::OpDesc op_desc(op, nullptr); PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); @@ -38,7 +34,7 @@ void TransposeOpConverter::operator()(const framework::proto::OpDesc &op, auto input = op_desc.Input("X").front(); auto output = op_desc.Output("Out").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - engine_->AddOp(op_name, "Permute", {input}, {output}); + this->engine_->AddOp(op_name, "Permute", {input}, {output}); auto axis = boost::get>(op_desc.GetAttr("axis")); size_t axis_size = axis.size(); @@ -46,7 +42,7 @@ void TransposeOpConverter::operator()(const framework::proto::OpDesc &op, axis.push_back(axis_size); axis_size += 1; } - engine_->AddOpAttr>(op_name, "dims", axis); + this->engine_->template AddOpAttr>(op_name, "dims", axis); } } // namespace anakin diff --git a/paddle/fluid/inference/anakin/convert/transpose.h b/paddle/fluid/inference/anakin/convert/transpose.h index bacbf152bc12319e6296677500b17d55d9772412..b7b0a0f209e7d6402ad7d5e30d23cf0523d3cf90 100644 --- a/paddle/fluid/inference/anakin/convert/transpose.h +++ b/paddle/fluid/inference/anakin/convert/transpose.h @@ -20,7 +20,8 @@ namespace paddle { namespace inference { namespace anakin { -class TransposeOpConverter : public AnakinOpConverter { +template +class TransposeOpConverter : public AnakinOpConverter { public: TransposeOpConverter() = default; diff --git a/paddle/fluid/inference/anakin/convert/ut_helper.h b/paddle/fluid/inference/anakin/convert/ut_helper.h index 029aff6704ff1015e5c2378a2202c94043df990d..2f8f953892c390c4790869d17981be6b9ae05d0f 100644 --- a/paddle/fluid/inference/anakin/convert/ut_helper.h +++ b/paddle/fluid/inference/anakin/convert/ut_helper.h @@ -32,14 +32,8 @@ limitations under the License. */ #include "paddle/fluid/inference/utils/singleton.h" #include "paddle/fluid/platform/enforce.h" -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; using anakin::Precision; -using anakin::saber::NV; using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; namespace paddle { namespace inference { @@ -55,8 +49,8 @@ float random(float low, float high) { return dist(mt); } -void RandomizeTensor(framework::LoDTensor* tensor, const platform::Place& place, - const platform::DeviceContext& ctx) { +void RandomizeTensor(framework::LoDTensor* tensor, + const platform::Place& place) { auto dims = tensor->dims(); size_t num_elements = analysis::AccuDims(dims, dims.size()); PADDLE_ENFORCE_GT(num_elements, 0); @@ -78,17 +72,19 @@ void RandomizeTensor(framework::LoDTensor* tensor, const platform::Place& place, * anakin * layer. */ +template class AnakinConvertValidation { - using AnakinNvEngineT = AnakinEngine; + using AnakinNvEngineT = AnakinEngine; public: AnakinConvertValidation() = delete; AnakinConvertValidation(const std::unordered_set& parameters, - framework::Scope* scope) - : parameters_(parameters), scope_(scope), place_(0) { - PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0); - engine_.reset(new AnakinEngine(true)); + framework::Scope* scope, + const platform::DeviceContext& ctx, + bool use_gpu = true) + : parameters_(parameters), scope_(scope), ctx_(ctx), use_gpu_(use_gpu) { + engine_.reset(new AnakinEngine(true)); } // Declare a Variable as input with random initialization. @@ -108,11 +104,10 @@ class AnakinConvertValidation { } void DeclVar(const std::string& name, const std::vector dim_vec) { - platform::CUDADeviceContext ctx(place_); auto* x = scope_->Var(name); auto* x_tensor = x->GetMutable(); x_tensor->Resize(framework::make_ddim(dim_vec)); - RandomizeTensor(x_tensor, place_, ctx); + RandomizeTensor(x_tensor, ctx_.GetPlace()); std::vector dim_vec_int64; for (auto& ele : dim_vec) { @@ -132,7 +127,7 @@ class AnakinConvertValidation { // should init anakin engine here. auto& block_desc = program_desc_.Block(framework::kRootBlockIndex); - Singleton::Global().ConvertOp( + Singleton>::Global().ConvertOp( desc, block_desc, parameters_, *scope_, engine_.get(), true /*test_mode*/); engine_->Freeze(); @@ -151,7 +146,7 @@ class AnakinConvertValidation { } engine_->SetMaxInputShape(temp_max_input_shape); engine_->Optimize(); - engine_->InitGraph(); + engine_->InitNet(); } // We use the set 'neglected_output' here, because some Ops like batch norm, @@ -160,11 +155,8 @@ class AnakinConvertValidation { void Execute(int batch_size, std::unordered_set neglected_output = {}) { // Execute Fluid Op - platform::CUDADeviceContext ctx(place_); - op_->Run(*scope_, place_); + op_->Run(*scope_, ctx_.GetPlace()); - // std::vector input_vector; - // std::vector output_vector; std::map inputs; for (const auto& input : op_desc_->InputArgumentNames()) { if (parameters_.count(input)) continue; @@ -180,20 +172,27 @@ class AnakinConvertValidation { std::vector fluid_out; auto* var = scope_->FindVar(output); auto tensor = var->GetMutable(); - framework::TensorToVector(*tensor, ctx, &fluid_out); + framework::TensorToVector(*tensor, ctx_, &fluid_out); fluid_outputs.push_back(fluid_out); outputs.insert({output, tensor}); } - engine_->Execute(inputs, outputs, stream_); + if (!use_gpu_) { + engine_->Execute(inputs, outputs); + } else { + cudaStream_t stream; + PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream), 0); + engine_->Execute(inputs, outputs, stream); + } + int i_output = 0; for (const auto& output : op_desc_->OutputArgumentNames()) { if (neglected_output.count(output)) continue; std::vector anakin_out; auto* var = scope_->FindVar(output); auto tensor = var->GetMutable(); - framework::TensorToVector(*tensor, ctx, &anakin_out); + framework::TensorToVector(*tensor, ctx_, &anakin_out); size_t anakin_out_size = anakin_out.size(); auto fluid_out = fluid_outputs[i_output++]; @@ -205,15 +204,24 @@ class AnakinConvertValidation { private: std::unique_ptr engine_{nullptr}; - cudaStream_t stream_; std::unique_ptr op_; std::unique_ptr op_desc_; framework::ProgramDesc program_desc_; const std::unordered_set& parameters_; framework::Scope* scope_; - platform::CUDAPlace place_; + const platform::DeviceContext& ctx_; + bool use_gpu_{true}; }; +template class AnakinConvertValidation<::anakin::saber::NV, + ::anakin::Precision::FP32>; +template class AnakinConvertValidation<::anakin::saber::X86, + ::anakin::Precision::FP32>; + +template class AnakinConvertValidation<::anakin::saber::NV, + ::anakin::Precision::INT8>; +template class AnakinConvertValidation<::anakin::saber::X86, + ::anakin::Precision::INT8>; } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/engine.cc b/paddle/fluid/inference/anakin/engine.cc index ba044c9401a5f0fb5a839c1766fdd9d412d42212..529a859458a9884a53778e7133ab121ed582a3fb 100644 --- a/paddle/fluid/inference/anakin/engine.cc +++ b/paddle/fluid/inference/anakin/engine.cc @@ -35,12 +35,15 @@ namespace anakin { template AnakinEngine::AnakinEngine( bool need_summary, int device, int max_batch_size, - std::map> max_input_shape) + std::map> max_input_shape, + std::vector program_inputs, bool auto_config_layout) : graph_(new AnakinGraphT()), net_(new AnakinNetT(need_summary)) { device_ = device; max_batch_size_ = max_batch_size; max_input_shape_ = max_input_shape; + program_inputs_ = program_inputs; + auto_config_layout_ = auto_config_layout; } template @@ -54,8 +57,8 @@ void AnakinEngine::SetInputShape( } template -void AnakinEngine::InitGraph() { - net_->init(*graph_); +void AnakinEngine::InitNet() { + net_->init(*graph_, auto_config_layout_); } template @@ -67,11 +70,11 @@ void AnakinEngine::AddOp( } template -void AnakinEngine::Execute( - const std::map &inputs, - const std::map &outputs, - cudaStream_t stream) { +void AnakinEngine::BindInput( + const std::map &inputs) { +#ifdef PADDLE_WITH_CUDA cudaDeviceSynchronize(); +#endif for (const auto &input : inputs) { auto *tensor = input.second; auto *data = tensor->data(); @@ -85,16 +88,53 @@ void AnakinEngine::Execute( int max_shape_sum = std::accumulate(max_input_shape.begin(), max_input_shape.end(), 1, std::multiplies()); - - PADDLE_ENFORCE(max_shape_sum >= tensor->numel(), - "The anakin input max shape should be greater than" - " or equal to the real input shape, Please set the max " - "input shape using EnableAnakinEngine"); + if (tensor->numel() > max_shape_sum) { + PADDLE_ENFORCE(std::find(program_inputs_.begin(), program_inputs_.end(), + input.first) == program_inputs_.end(), + "The anakin input max shape should be greater than" + " or equal to the real input shape, Please set the max " + "input shape using EnableAnakinEngine"); + VLOG(3) << "Anakin Net will be reset because of the inputs out of range: " + << input.first; + graph_->Reshape(input.first, fluid_input_shape); + net_.reset(new AnakinNetT(true)); + net_->init(*graph_); + anakin_input = net_->get_in(input.first); + } anakin_input->reshape(fluid_input_shape); ::anakin::saber::Tensor tmp_anakin_tensor(data, TargetT(), 0, fluid_input_shape); anakin_input->copy_from(tmp_anakin_tensor); } +} + +template +void AnakinEngine::Execute( + const std::map &inputs, + const std::map &outputs) { + BindInput(inputs); + net_->prediction(); + for (const auto &output : outputs) { + platform::CPUPlace cpu_place; + auto *tensor = output.second; + auto *anakin_output = net_->get_out(output.first); + auto *anakin_data = anakin_output->data(); + auto anakin_output_shape = anakin_output->valid_shape(); + tensor->Resize(framework::make_ddim(anakin_output_shape)); + auto *fluid_data = tensor->mutable_data(cpu_place); + memory::Copy(cpu_place, static_cast(fluid_data), cpu_place, + static_cast(anakin_data), + tensor->numel() * sizeof(float)); + } +} + +#ifdef PADDLE_WITH_CUDA +template +void AnakinEngine::Execute( + const std::map &inputs, + const std::map &outputs, + cudaStream_t stream) { + BindInput(inputs); net_->prediction(); cudaDeviceSynchronize(); for (const auto &output : outputs) { @@ -111,10 +151,11 @@ void AnakinEngine::Execute( } cudaDeviceSynchronize(); } +#endif template void AnakinEngine::Freeze() { - PADDLE_ENFORCE(graph_->Freeze_v3(), "Freeze anakin subgraph."); + PADDLE_ENFORCE(graph_->Freeze(), "Freeze anakin subgraph."); } template @@ -122,6 +163,12 @@ void AnakinEngine::Optimize() { PADDLE_ENFORCE(graph_->Optimize(), "Graph optimization."); } +template +void AnakinEngine::RegistBlock( + ::anakin::PBlock *block_p) { + PADDLE_ENFORCE(graph_->RegistBlock(block_p), "Block register."); +} + template std::unique_ptr> AnakinEngine::Clone() { @@ -130,7 +177,24 @@ AnakinEngine::Clone() { return std::unique_ptr(engine); } +#ifdef PADDLE_WITH_CUDA template class AnakinEngine<::anakin::saber::NV, ::anakin::Precision::FP32>; +template class AnakinEngineManager<::anakin::saber::NV, + ::anakin::Precision::FP32>; + +template class AnakinEngine<::anakin::saber::NV, ::anakin::Precision::INT8>; +template class AnakinEngineManager<::anakin::saber::NV, + ::anakin::Precision::INT8>; +#endif + +template class AnakinEngine<::anakin::saber::X86, ::anakin::Precision::FP32>; +template class AnakinEngineManager<::anakin::saber::X86, + ::anakin::Precision::FP32>; +template class AnakinEngine<::anakin::saber::X86, ::anakin::Precision::INT8>; +template class AnakinEngineManager<::anakin::saber::X86, + ::anakin::Precision::INT8>; + +// template class AnakinEngine<::anakin::saber::X86, ::anakin::Precision::FP32>; } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/engine.h b/paddle/fluid/inference/anakin/engine.h index 4845ffdf5b9dcfa99d1f421d47328beb4b196298..fb40f56511ba255413d422f156f4265102616d03 100644 --- a/paddle/fluid/inference/anakin/engine.h +++ b/paddle/fluid/inference/anakin/engine.h @@ -32,7 +32,6 @@ #include "saber/saber_types.h" using anakin::Precision; -using anakin::saber::NV; namespace anakin { @@ -58,9 +57,11 @@ class AnakinEngine { public: explicit AnakinEngine( bool need_summary = false, int device = 0, int max_batch_size = 1, - std::map> max_input_shape = {}); + std::map> max_input_shape = {}, + std::vector program_inputs = {}, + bool auto_config_layout = false); ~AnakinEngine(); - void InitGraph(); + void InitNet(); void SetInputShape(const std::string &name, std::vector shape); void AddOp(const std::string &name, const std::string &type, const std::vector &inputs, @@ -81,20 +82,35 @@ class AnakinEngine { void SetMaxInputShape(std::map> shape) { max_input_shape_ = shape; } + const std::vector &GetScalableInputs() { + return program_inputs_; + } + void SetScalableInputs(std::vector program_inputs) { + program_inputs_ = program_inputs; + } int GetMaxBatchSize() { return max_batch_size_; } void Freeze(); void Optimize(); - void AllocTmpMem() { - PADDLE_ENFORCE(net_->alloc_memory_first(*graph_), - "anakin alloc temp memory first failed"); - } + void RegistBlock(::anakin::PBlock *block_p); void Save(std::string path) { graph_->save(path); } - bool IsInit() { return initialized_; } int GetDevice() { return device_; } + void AddTensorScale(const std::string &tensor_name, float scale) { + tensor_scales_[tensor_name] = scale; + } + std::unordered_map GetTensorScales() { + return tensor_scales_; + } + void Execute(const std::map &inputs, + const std::map &outputs); +#ifdef PADDLE_WITH_CUDA void Execute(const std::map &inputs, const std::map &outputs, cudaStream_t stream); +#endif + + private: + void BindInput(const std::map &inputs); private: bool initialized_{false}; @@ -103,27 +119,33 @@ class AnakinEngine { int device_; std::unique_ptr graph_; std::unique_ptr net_; + std::vector program_inputs_; + std::unordered_map tensor_scales_; + // Always be false in gpu mode but true in most cpu cases. + bool auto_config_layout_; }; +template class AnakinEngineManager { - using AnakinNvEngineT = AnakinEngine; + using AnakinEngineT = AnakinEngine; public: bool HasEngine(const std::string &name) const { if (engines_.count(name) == 0) return false; return engines_.at(name).get() != nullptr; } - AnakinNvEngineT *Get(const std::string &name) const { + AnakinEngineT *Get(const std::string &name) const { return engines_.at(name).get(); } - AnakinNvEngineT *Create( - bool need_summary, int device, int max_batch_size, - std::map> max_input_shape, - std::string engine_name) { + AnakinEngineT *Create(bool need_summary, int device, int max_batch_size, + std::map> max_input_shape, + std::vector program_inputs, + bool auto_config_layout, std::string engine_name) { std::unique_lock lk(mut_); - auto *p = new AnakinEngine( - need_summary, device, max_batch_size, max_input_shape); + auto *p = new AnakinEngine( + need_summary, device, max_batch_size, max_input_shape, program_inputs, + auto_config_layout); engines_[engine_name].reset(p); return p; } @@ -135,7 +157,7 @@ class AnakinEngineManager { } private: - std::unordered_map> engines_; + std::unordered_map> engines_; std::mutex mut_; }; } // namespace anakin diff --git a/paddle/fluid/inference/anakin/op_teller.cc b/paddle/fluid/inference/anakin/op_teller.cc index 2042fb18ea41f8b41fc35543c7e1b642c4f2fa7c..67b771226c4999a361a818e32e8caedd81723c03 100644 --- a/paddle/fluid/inference/anakin/op_teller.cc +++ b/paddle/fluid/inference/anakin/op_teller.cc @@ -44,6 +44,11 @@ struct SimpleOpTypeSetTeller : public Teller { teller_set.insert("sum"); teller_set.insert("depthwise_conv2d"); teller_set.insert("prior_box"); + teller_set.insert("leaky_relu"); + teller_set.insert("affine_channel"); + teller_set.insert("relu6"); + teller_set.insert("swish"); + teller_set.insert("shuffle_channel"); } bool operator()(const std::string& op_type, diff --git a/paddle/fluid/inference/anakin/test_anakin_engine.cc b/paddle/fluid/inference/anakin/test_anakin_engine.cc index 8fd6b8bec9ada6dec67fd24a2457713203431ebf..422f415a5db62d9408834f600f875d7825d44952 100644 --- a/paddle/fluid/inference/anakin/test_anakin_engine.cc +++ b/paddle/fluid/inference/anakin/test_anakin_engine.cc @@ -19,7 +19,6 @@ limitations under the License. */ #include "paddle/fluid/inference/anakin/engine.h" -using anakin::graph::GraphGlobalMem; using anakin::AK_FLOAT; using anakin::Precision; using anakin::saber::NV; @@ -52,11 +51,9 @@ TEST_F(TestAnakinEngine, Execute) { engine_->AddOpAttr("op1", "axis", 1); std::vector shape = {1, 1, 1, 2}; Shape tmp_shape(shape); - // PBlock weight1(tmp_shape); - auto *weight1 = - GraphGlobalMem::Global().template new_block(tmp_shape); - // auto *weight1 = new PBlock(tmp_shape, AK_FLOAT); + PBlock *weight1 = new PBlock(tmp_shape, AK_FLOAT); + engine_->RegistBlock(weight1); float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); cpu_data[0] = 2.; weight1->d_tensor().set_shape(tmp_shape); @@ -68,7 +65,7 @@ TEST_F(TestAnakinEngine, Execute) { // engine_->AddOpAttr("x", "input_shape", input_shape); engine_->SetInputShape("x", {1, 1, 1, 1}); engine_->Optimize(); - engine_->InitGraph(); + engine_->InitNet(); framework::LoDTensor x; framework::LoDTensor y; x.Resize({1, 1, 1, 1}); diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index a736ca393ccb7168a9faf650a6bce13f35fffca8..66e8d8b5287178fd00dba963a2f4011ce8d8e51e 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -64,20 +64,20 @@ struct Argument { bool Has(const std::string& key) const { return valid_fields_.count(key); } -#define DECL_ARGUMENT_FIELD(field__, Field, type__) \ - public: \ - type__& field__() { \ - PADDLE_ENFORCE(Has(#field__)); \ - return field__##_; \ - } \ - void Set##Field(const type__& x) { \ - field__##_ = x; \ - valid_fields_.insert(#field__); \ - } \ - DECL_ARGUMENT_FIELD_VALID(field__); \ - type__* field__##_ptr() { return &field__##_; } \ - \ - private: \ +#define DECL_ARGUMENT_FIELD(field__, Field, type__) \ + public: \ + type__& field__() { \ + PADDLE_ENFORCE(Has(#field__), "There is no such field"); \ + return field__##_; \ + } \ + void Set##Field(const type__& x) { \ + field__##_ = x; \ + valid_fields_.insert(#field__); \ + } \ + DECL_ARGUMENT_FIELD_VALID(field__); \ + type__* field__##_ptr() { return &field__##_; } \ + \ + private: \ type__ field__##_; #define DECL_ARGUMENT_FIELD_VALID(field__) \ @@ -169,7 +169,14 @@ struct Argument { anakin_max_shape_t); DECL_ARGUMENT_FIELD(anakin_max_batch_size, AnakinMaxBatchSize, int); DECL_ARGUMENT_FIELD(anakin_min_subgraph_size, AnakinMinSubgraphSize, int); + DECL_ARGUMENT_FIELD(anakin_precision_mode, AnakinPrecisionMode, + AnalysisConfig::Precision); + DECL_ARGUMENT_FIELD(anakin_auto_config_layout, AnakinAutoConfigLayout, bool); DECL_ARGUMENT_FIELD(use_anakin, UseAnakin, bool); + DECL_ARGUMENT_FIELD(anakin_passes_filter, AnakinPassesFilter, + std::vector); + DECL_ARGUMENT_FIELD(anakin_ops_filter, AnakinOpsFilter, + std::vector); // Memory optimized related. DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool); diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 78e502c670f0eb2480b560964cf31e247990a367..4714c30507c4c3f8978ec10f3b19fd3f8a3b3b3d 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -114,6 +114,7 @@ void IRPassManager::CreatePasses(Argument *argument, if (pass_name == "anakin_subgraph_pass") { pass->Set("program", new framework::ProgramDesc *(&argument->main_program())); + pass->Set("use_gpu", new bool(argument->use_gpu())); pass->Set("gpu_device_id", new int(argument->gpu_device_id())); pass->Set("model_from_memory", new bool(argument->model_from_memory())); pass->Set("engine_opt_info", new std::map( @@ -122,6 +123,13 @@ void IRPassManager::CreatePasses(Argument *argument, pass->Set("max_input_shape", new std::map>( argument->anakin_max_input_shape())); pass->Set("max_batch_size", new int(argument->anakin_max_batch_size())); + bool enable_int8 = + argument->anakin_precision_mode() == AnalysisConfig::Precision::kInt8; + pass->Set("enable_int8", new bool(enable_int8)); + pass->Set("anakin_ops_filter", + new std::vector(argument->anakin_ops_filter())); + pass->Set("auto_config_layout", + new bool(argument->anakin_auto_config_layout())); } pre_pass = pass_name; diff --git a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc index b8d8b6fed8ca237e87cfc67979ec6ddd340b8916..9586ce3e6b01422db1616060946cf5b11c5a1c29 100644 --- a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc @@ -39,8 +39,14 @@ void analysis::AnakinSubgraphPass::ApplyImpl( framework::ir::Graph *graph) const { framework::ir::FusePassBase::Init("anakin_subgraph_pass", graph); - auto teller = [](const framework::ir::Node *node) { - if (!node->IsOp() || !node->Op()) return false; + auto &anakin_ops_filter = Get>("anakin_ops_filter"); + + auto teller = [&anakin_ops_filter](const framework::ir::Node *node) { + if (!node->IsOp() || !node->Op()) + return false; + else if (std::find(anakin_ops_filter.begin(), anakin_ops_filter.end(), + node->Op()->Type()) != anakin_ops_filter.end()) + return false; return anakin::OpTeller::Global().Tell(node->Op()->Type(), *node->Op()); }; @@ -191,22 +197,78 @@ void AnakinSubgraphPass::CreateAnakinOp( SetAttr(op_desc->Proto(), "engine_key", engine_key); auto max_input_shape = Get>>("max_input_shape"); - auto max_batch_size = Get("max_batch_size"); + auto program_inputs = program_desc->GetFeedTargetNames(); - auto *anakin_engine = - inference::Singleton::Global().Create( - true, Get("gpu_device_id"), max_batch_size, max_input_shape, - engine_key); + bool use_gpu = Get("use_gpu"); + SetAttr(op_desc->Proto(), "use_gpu", use_gpu); + bool enable_int8 = Get("enable_int8"); + SetAttr(op_desc->Proto(), "enable_int8", enable_int8); + if (enable_int8) { + CreateAnakinEngine<::anakin::Precision::INT8>(&block_desc, params, + input_names, output_mapping, + program_inputs, engine_key); + } else { + CreateAnakinEngine<::anakin::Precision::FP32>(&block_desc, params, + input_names, output_mapping, + program_inputs, engine_key); + } +} + +template <::anakin::Precision PrecisionT> +void AnakinSubgraphPass::CreateAnakinEngine( + framework::BlockDesc *block_desc, const std::vector ¶ms, + const std::set &input_names, + const std::vector &output_mapping, + const std::vector &program_inputs, + const std::string &engine_key) const { + framework::BlockDesc block_desc_temp(nullptr, block_desc->Proto()); + bool use_gpu = Get("use_gpu"); + auto max_batch_size = Get("max_batch_size"); + auto max_input_shape = + Get>>("max_input_shape"); + bool auto_config_layout = Get("auto_config_layout"); + if (use_gpu) { +#ifdef PADDLE_WITH_CUDA + inference::Singleton< + anakin::AnakinEngineManager<::anakin::saber::NV, PrecisionT>>::Global() + .Create(true, Get("gpu_device_id"), max_batch_size, + max_input_shape, program_inputs, false, engine_key); +#endif + } else { + inference::Singleton< + anakin::AnakinEngineManager<::anakin::saber::X86, PrecisionT>>::Global() + .Create(true, Get("gpu_device_id"), max_batch_size, + max_input_shape, program_inputs, auto_config_layout, + engine_key); + } auto *scope = param_scope(); std::unordered_set param_set(params.begin(), params.end()); - framework::BlockDesc block_desc_temp(nullptr, block_desc.Proto()); - - inference::Singleton::Global() - .ConvertBlockToAnakinEngine( - &block_desc_temp, scope, - std::vector(input_names.begin(), input_names.end()), - param_set, output_mapping, anakin_engine); + if (use_gpu) { +#ifdef PADDLE_WITH_CUDA + auto *anakin_engine = + inference::Singleton>::Global() + .Get(engine_key); + inference::Singleton>::Global() + .ConvertBlockToAnakinEngine( + &block_desc_temp, scope, + std::vector(input_names.begin(), input_names.end()), + param_set, output_mapping, anakin_engine); +#endif + } else { + auto *anakin_engine = + inference::Singleton>::Global() + .Get(engine_key); + inference::Singleton>::Global() + .ConvertBlockToAnakinEngine( + &block_desc_temp, scope, + std::vector(input_names.begin(), input_names.end()), + param_set, output_mapping, anakin_engine); + } } } // namespace analysis diff --git a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h index e80b8bb612096a1da7cd5835c948085d51fdfe7a..4ab2297b2d48876a95f41deb715188b2476b6b38 100644 --- a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h +++ b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h @@ -15,6 +15,7 @@ #pragma once #include #include +#include #include #include #include "paddle/fluid/framework/ir/pass.h" @@ -36,6 +37,13 @@ class AnakinSubgraphPass : public framework::ir::FusePassBase { const std::vector &graph_params, std::vector *repetitive_params) const; void CleanIntermediateOutputs(framework::ir::Node *node); + template <::anakin::Precision PrecisionT> + void CreateAnakinEngine(framework::BlockDesc *block_desc, + const std::vector ¶ms, + const std::set &input_names, + const std::vector &output_mapping, + const std::vector &program_inputs, + const std::string &engine_key) const; }; } // namespace analysis diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc index 7c4aab06a1d2b3fadc76b46c7e95cea7818c56e2..8f7c6ac7553676b1fb81fea023e50e56ec1d132f 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc @@ -100,7 +100,6 @@ void RenameAndGetOutputs( const std::string arg_value = in_var->arguments(k); const std::string arg_value_with_id = arg_value + std::to_string(var2id[arg_value]); - if (input_names_with_id.count(arg_value_with_id)) { replaced_names.push_back(arg_value); if (graph_var_map.count(arg_value)) { @@ -149,7 +148,6 @@ void RenameAndGetOutputs( const std::string arg_value = out_var->arguments(k); const std::string arg_value_with_id = arg_value + std::to_string(var2id[arg_value]); - if (graph_var_map.count(arg_value)) { add_block_var(arg_value, arg_value_with_id); } diff --git a/paddle/fluid/inference/analysis/passes/CMakeLists.txt b/paddle/fluid/inference/analysis/passes/CMakeLists.txt index 9d74dc6c211e4fcb6d1e7de5369eee847f49fc78..a8d0c69a54ab39781613d26474098450398d4c1b 100644 --- a/paddle/fluid/inference/analysis/passes/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/passes/CMakeLists.txt @@ -3,11 +3,13 @@ cc_library(ir_analysis_pass SRCS ir_analysis_pass.cc DEPS analysis_pass argument cc_library(memory_optim_pass SRCS memory_optimize_pass.cc DEPS analysis_pass zero_copy_tensor) cc_library(ir_params_sync_among_devices_pass SRCS ir_params_sync_among_devices_pass.cc DEPS analysis_pass argument ir_pass_manager) cc_library(ir_graph_to_program_pass SRCS ir_graph_to_program_pass.cc DEPS analysis_pass graph_to_program_pass) +cc_library(adjust_cudnn_workspace_size_pass SRCS adjust_cudnn_workspace_size_pass.cc DEPS analysis_pass graph_to_program_pass) cc_library(analysis_passes SRCS passes.cc DEPS ir_graph_build_pass ir_analysis_pass ir_params_sync_among_devices_pass + adjust_cudnn_workspace_size_pass memory_optim_pass ir_graph_to_program_pass ) diff --git a/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..0470e0d5a247163ecd7e7dd1e8f88e6b71ae93d7 --- /dev/null +++ b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +void AdjustCudnnWorkSpacePass::RunImpl(Argument* argument) { + if (!argument->use_gpu()) return; + auto& graph = argument->main_graph(); + auto nodes = graph.Nodes(); + const int cudnn_workspace_size_MB = 64; + const std::string attr_name = "workspace_size_MB"; + + for (auto& node : nodes) { + if (!node->IsOp()) continue; + auto* op_desc = node->Op(); + if (!op_desc->HasAttr(attr_name)) continue; + op_desc->SetAttr(attr_name, cudnn_workspace_size_MB); + op_desc->Flush(); + } +} + +std::string AdjustCudnnWorkSpacePass::repr() const { + return "adjust-cudnn-work-space-pass"; +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..65d1c545313e110028a92776e73a070d32010420 --- /dev/null +++ b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h @@ -0,0 +1,41 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/inference/analysis/analysis_pass.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * The default cudnn workspace is 4G, we set it to 64M in this pass, which + * is applicable for most inference tasks. + */ +class AdjustCudnnWorkSpacePass : public AnalysisPass { + public: + void RunImpl(Argument *argument) override; + std::string repr() const override; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/passes/passes.cc b/paddle/fluid/inference/analysis/passes/passes.cc index 161b127d6d5ceb3e8b9c1cf98c69eb0387bfb905..a55904ed536bad31c82888ede2db3178f3fd5e47 100644 --- a/paddle/fluid/inference/analysis/passes/passes.cc +++ b/paddle/fluid/inference/analysis/passes/passes.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/inference/analysis/passes/passes.h" +#include "paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_analysis_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h" @@ -35,6 +36,8 @@ PassRegistry::PassRegistry() { passes_.emplace( "ir_params_sync_among_devices_pass", std::unique_ptr(new IrParamsSyncAmongDevicesPass)); + passes_.emplace("adjust_cudnn_workspace_size_pass", + std::unique_ptr(new AdjustCudnnWorkSpacePass)); passes_.emplace( "ir_graph_to_program_pass", std::unique_ptr(new IrGraphToProgramPass)); diff --git a/paddle/fluid/inference/analysis/passes/passes.h b/paddle/fluid/inference/analysis/passes/passes.h index ea07e0dcbd992c9d10c6662909798ef79a01e3a7..8a13091d083e51ecc84e6790f973ffa39ba5a6b9 100644 --- a/paddle/fluid/inference/analysis/passes/passes.h +++ b/paddle/fluid/inference/analysis/passes/passes.h @@ -14,7 +14,9 @@ #pragma once +#include #include +#include #include "paddle/fluid/inference/analysis/analysis_pass.h" namespace paddle { diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index b54ea269ff250f02b6331807237e10ee65b0b0b4..8b940b67e3f9c3e11bb8e15df1a8674bda0c06d0 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -116,6 +116,10 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { CP_MEMBER(anakin_max_batchsize_); CP_MEMBER(anakin_max_input_shape_); CP_MEMBER(anakin_min_subgraph_size_); + CP_MEMBER(anakin_precision_mode_); + CP_MEMBER(anakin_auto_config_layout_); + CP_MEMBER(anakin_passes_filter_); + CP_MEMBER(anakin_ops_filter_); // Ir related. CP_MEMBER(enable_ir_optim_); @@ -269,13 +273,18 @@ void AnalysisConfig::Update() { PADDLE_ENFORCE(!use_tensorrt_, "Anakin sub-graph and TensorRT sub-graph are not allowed to " "run at the same time!"); - PADDLE_ENFORCE( - use_gpu_, - "Anakin sub-graph engine need gpu, please use the EnableGpu API."); + if (use_gpu_) { + LOG(INFO) << "Run Anakin GPU mode"; + } else { + LOG(INFO) << "Run Anakin CPU mode"; + } pass_builder()->ClearPasses(); for (const auto &pass : kAnakinSubgraphPasses) { - pass_builder()->AppendPass(pass); + if (std::find(anakin_passes_filter_.begin(), anakin_passes_filter_.end(), + pass) == anakin_passes_filter_.end()) { + pass_builder()->AppendPass(pass); + } } } @@ -390,11 +399,17 @@ void AnalysisConfig::SwitchIrDebug(int x) { } void AnalysisConfig::EnableAnakinEngine( int max_batch_size, std::map> max_input_shape, - int min_subgraph_size) { + int min_subgraph_size, AnalysisConfig::Precision precision_mode, + bool auto_config_layout, std::vector passes_filter, + std::vector ops_filter) { anakin_max_batchsize_ = max_batch_size; anakin_max_input_shape_ = max_input_shape; anakin_min_subgraph_size_ = min_subgraph_size; + anakin_passes_filter_ = passes_filter; + anakin_ops_filter_ = ops_filter; use_anakin_ = true; + anakin_precision_mode_ = precision_mode; + anakin_auto_config_layout_ = auto_config_layout; Update(); } } // namespace paddle diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index a84c909b3b7287ddc56dce8df6db3c91c338ecfa..e57d3a80456767848143412b2524f94fa09c7c13 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -120,7 +120,11 @@ bool AnalysisPredictor::PrepareScope( scope_ = parent_scope; status_is_cloned_ = true; } else { - paddle::framework::InitDevices(false); + if (config_.use_gpu_) { + paddle::framework::InitDevices(false, {config_.device_id_}); + } else { + paddle::framework::InitDevices(false, {}); + } scope_.reset(new paddle::framework::Scope()); status_is_cloned_ = false; } @@ -383,10 +387,14 @@ void AnalysisPredictor::PrepareArgument() { argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_); } - if (config_.use_gpu() && config_.anakin_engine_enabled()) { + if (config_.anakin_engine_enabled()) { argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_); argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_); argument_.SetAnakinMinSubgraphSize(config_.anakin_min_subgraph_size_); + argument_.SetAnakinPrecisionMode(config_.anakin_precision_mode_); + argument_.SetAnakinAutoConfigLayout(config_.anakin_auto_config_layout_); + argument_.SetAnakinPassesFilter(config_.anakin_passes_filter_); + argument_.SetAnakinOpsFilter(config_.anakin_ops_filter_); LOG(INFO) << "Anakin subgraph engine is enabled"; } @@ -455,6 +463,8 @@ std::unique_ptr CreatePaddlePredictor< std::string flag = "--fraction_of_gpu_memory_to_use=" + std::to_string(fraction_of_gpu_memory); flags.push_back(flag); + flags.push_back("--selected_gpus=" + + std::to_string(config.gpu_device_id())); VLOG(3) << "set flag: " << flag; framework::InitGflags(flags); } @@ -929,4 +939,9 @@ USE_ANAKIN_CONVERTER(density_prior_box); USE_ANAKIN_CONVERTER(dropout); USE_ANAKIN_CONVERTER(sum); USE_ANAKIN_CONVERTER(prior_box); +USE_ANAKIN_CONVERTER(leaky_relu); +USE_ANAKIN_CONVERTER(affine_channel); +USE_ANAKIN_CONVERTER(relu6); +USE_ANAKIN_CONVERTER(swish); +USE_ANAKIN_CONVERTER(shuffle_channel); #endif diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index c89dd41e0a6283e0723e2925f28c0372cda6a2b2..ab7f55337488f9e4c953210124e47c12e26ed6b1 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -26,14 +26,20 @@ #include #include #include +#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/port.h" #include "paddle/fluid/string/printf.h" +extern std::string paddle::framework::DataTypeToString( + const framework::proto::VarType::Type type); + namespace paddle { namespace inference { +using paddle::framework::DataTypeToString; + // Timer for timer class Timer { public: @@ -267,17 +273,20 @@ static std::string DescribeZeroCopyTensor(const ZeroCopyTensor &tensor) { } static void PrintTime(int batch_size, int repeat, int num_threads, int tid, - double batch_latency, int epoch = 1) { + double batch_latency, int epoch = 1, + const framework::proto::VarType::Type data_type = + framework::proto::VarType::FP32) { PADDLE_ENFORCE(batch_size > 0, "Non-positive batch size."); double sample_latency = batch_latency / batch_size; LOG(INFO) << "====== threads: " << num_threads << ", thread id: " << tid << " ======"; - LOG(INFO) << "====== batch_size: " << batch_size << ", iterations: " << epoch + LOG(INFO) << "====== batch size: " << batch_size << ", iterations: " << epoch << ", repetitions: " << repeat << " ======"; LOG(INFO) << "====== batch latency: " << batch_latency << "ms, number of samples: " << batch_size * epoch << ", sample latency: " << sample_latency - << "ms, fps: " << 1000.f / sample_latency << " ======"; + << "ms, fps: " << 1000.f / sample_latency + << ", data type: " << DataTypeToString(data_type) << " ======"; } static bool IsFileExists(const std::string &path) { diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index c67c4b5bd0bfeea6d022f9e821f6d0b877c71d7a..ebe289322bdd32294885ce933b960773733f62f0 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -152,7 +152,10 @@ struct AnalysisConfig { void EnableAnakinEngine( int max_batch_size = 1, std::map> max_input_shape = {}, - int min_subgraph_size = 6); + int min_subgraph_size = 6, Precision precision = Precision::kFloat32, + bool auto_config_layout = false, + std::vector passes_filter = {}, + std::vector ops_filter = {}); /** A boolean state indicating whether the Anakin sub-graph engine is used. */ @@ -291,6 +294,10 @@ struct AnalysisConfig { int anakin_max_batchsize_; int anakin_min_subgraph_size_{6}; std::map> anakin_max_input_shape_; + Precision anakin_precision_mode_; + bool anakin_auto_config_layout_{false}; + std::vector anakin_passes_filter_; + std::vector anakin_ops_filter_; std::map engine_opt_info_; bool use_mkldnn_quantizer_{false}; diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index 2fba560ac2e29fd685c6afaee6055fc11ecd75fa..2a7bd55a76e31a9e4f67ddc49f5dbc2c4eaa2be9 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -73,15 +73,15 @@ void PaddlePassBuilder::ClearPasses() { passes_.clear(); } // The following passes works for Anakin sub-graph engine. const std::vector kAnakinSubgraphPasses({ "infer_clean_graph_pass", // + "quant_conv2d_dequant_fuse_pass", // "simplify_anakin_priorbox_detection_out_pass", // "fillconstant_elementwisemul_fuse", // "fc_fuse_pass", // "conv_elementwise_add_fuse_pass", // - "conv_bn_fuse_pass", // - "conv_elementwise_add_fuse_pass", // "fc_gru_fuse_pass", // - "quant_conv2d_dequant_fuse_pass", // - "anakin_subgraph_pass", + "shuffle_channel_detect_pass", // + "anakin_subgraph_pass", // + "fc_gru_fuse_pass", // }); GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index 09ef195d5e66aff0cef17f1594de34c656187a35..057e7dc65d5fd41212cbee77a2a4f4431b011182 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -73,7 +73,8 @@ class PaddlePassBuilder { protected: std::vector analysis_passes_{ {"ir_graph_build_pass", "ir_analysis_pass", - "ir_params_sync_among_devices_pass"}}; + "ir_params_sync_among_devices_pass", + "adjust_cudnn_workspace_size_pass"}}; std::vector passes_; }; diff --git a/paddle/fluid/inference/check_symbol.sh b/paddle/fluid/inference/check_symbol.sh index 12b7b3e7e5982f193e48596b867953fc93841b61..b6b7d1f20baf77c89ecbc149668da2ff2d2e3b5e 100755 --- a/paddle/fluid/inference/check_symbol.sh +++ b/paddle/fluid/inference/check_symbol.sh @@ -4,7 +4,7 @@ lib=$1 if [ $# -ne 1 ]; then echo "No input library"; exit -1 ; fi num_paddle_syms=$(nm -D ${lib} | grep paddle | wc -l) -num_google_syms=$(nm -D ${lib} | grep google | grep -v paddle | grep T | wc -l) +num_google_syms=$(nm -D ${lib} | grep google | grep -v paddle | grep "T " | wc -l) if [ $num_paddle_syms -le 0 ]; then echo "Have no paddle symbols"; exit -1 ; fi if [ $num_google_syms -ge 1 ]; then echo "Have some google symbols"; exit -1 ; fi diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 10fc7556994b93776ed15184ba17820cebae07a0..a50810948ff8cb9e0bb92c287a7ab3945d39e089 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -65,6 +65,8 @@ DECLARE_int32(paddle_num_threads); namespace paddle { namespace inference { +using paddle::framework::proto::VarType; + template constexpr paddle::PaddleDType GetPaddleDType(); @@ -293,7 +295,8 @@ void ConvertPaddleTensorToZeroCopyTensor( void PredictionWarmUp(PaddlePredictor *predictor, const std::vector> &inputs, std::vector> *outputs, - int num_threads, int tid) { + int num_threads, int tid, + const VarType::Type data_type = VarType::FP32) { int batch_size = FLAGS_batch_size; LOG(INFO) << "Running thread " << tid << ", warm up run..."; if (FLAGS_zero_copy) { @@ -307,7 +310,7 @@ void PredictionWarmUp(PaddlePredictor *predictor, } else { predictor->ZeroCopyRun(); } - PrintTime(batch_size, 1, num_threads, tid, warmup_timer.toc(), 1); + PrintTime(batch_size, 1, num_threads, tid, warmup_timer.toc(), 1, data_type); if (FLAGS_profile) { paddle::platform::ResetProfiler(); } @@ -316,7 +319,8 @@ void PredictionWarmUp(PaddlePredictor *predictor, void PredictionRun(PaddlePredictor *predictor, const std::vector> &inputs, std::vector> *outputs, - int num_threads, int tid) { + int num_threads, int tid, + const VarType::Type data_type = VarType::FP32) { int num_times = FLAGS_repeat; int iterations = inputs.size(); // process the whole dataset ... if (FLAGS_iterations > 0 && @@ -355,7 +359,7 @@ void PredictionRun(PaddlePredictor *predictor, auto batch_latency = elapsed_time / (iterations * num_times); PrintTime(FLAGS_batch_size, num_times, num_threads, tid, batch_latency, - iterations); + iterations, data_type); if (FLAGS_record_benchmark) { Benchmark benchmark; benchmark.SetName(FLAGS_model_name); @@ -368,12 +372,13 @@ void PredictionRun(PaddlePredictor *predictor, void TestOneThreadPrediction( const PaddlePredictor::Config *config, const std::vector> &inputs, - std::vector> *outputs, bool use_analysis = true) { + std::vector> *outputs, bool use_analysis = true, + const VarType::Type data_type = VarType::FP32) { auto predictor = CreateTestPredictor(config, use_analysis); if (FLAGS_warmup) { - PredictionWarmUp(predictor.get(), inputs, outputs, 1, 0); + PredictionWarmUp(predictor.get(), inputs, outputs, 1, 0, data_type); } - PredictionRun(predictor.get(), inputs, outputs, 1, 0); + PredictionRun(predictor.get(), inputs, outputs, 1, 0, data_type); } void TestMultiThreadPrediction( @@ -505,13 +510,14 @@ void CompareQuantizedAndAnalysis( auto *cfg = reinterpret_cast(config); PrintConfig(cfg, true); std::vector> analysis_outputs; - TestOneThreadPrediction(cfg, inputs, &analysis_outputs, true); + TestOneThreadPrediction(cfg, inputs, &analysis_outputs, true, VarType::FP32); LOG(INFO) << "--- INT8 prediction start ---"; auto *qcfg = reinterpret_cast(qconfig); PrintConfig(qcfg, true); std::vector> quantized_outputs; - TestOneThreadPrediction(qcfg, inputs, &quantized_outputs, true); + TestOneThreadPrediction(qcfg, inputs, &quantized_outputs, true, + VarType::INT8); LOG(INFO) << "--- comparing outputs --- "; CompareTopAccuracy(quantized_outputs, analysis_outputs); @@ -640,7 +646,7 @@ static bool CompareTensorData(const framework::LoDTensor &a, } for (size_t i = 0; i < a_size; i++) { - if (a.type() == framework::proto::VarType::FP32) { + if (a.type() == VarType::FP32) { const auto *a_data = a.data(); const auto *b_data = b.data(); if (std::abs(a_data[i] - b_data[i]) > 1e-3) { @@ -649,7 +655,7 @@ static bool CompareTensorData(const framework::LoDTensor &a, b_data[i]); return false; } - } else if (a.type() == framework::proto::VarType::INT64) { + } else if (a.type() == VarType::INT64) { const auto *a_data = a.data(); const auto *b_data = b.data(); if (std::abs(a_data[i] - b_data[i]) > 1e-3) { diff --git a/paddle/fluid/memory/allocation/CMakeLists.txt b/paddle/fluid/memory/allocation/CMakeLists.txt index ac77c3d2a500816a4eb41ed13f23ee628290f287..3dbbea3dd0beb78997f6e8f6b7451ea806bce9b3 100644 --- a/paddle/fluid/memory/allocation/CMakeLists.txt +++ b/paddle/fluid/memory/allocation/CMakeLists.txt @@ -61,6 +61,10 @@ nv_test(allocation_and_eigen_test SRCS allocation_and_eigen_test.cu DEPS allocat cc_test(retry_allocator_test SRCS retry_allocator_test.cc DEPS retry_allocator best_fit_allocator locked_allocator cpu_allocator) +if (WITH_TESTING) + set_tests_properties(retry_allocator_test PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE") +endif() + cc_test(allocator_facade_abs_flags_test SRCS allocator_facade_abs_flags_test.cc DEPS allocator_facade) cc_test(allocator_facade_frac_flags_test SRCS allocator_facade_frac_flags_test.cc DEPS allocator_facade) diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 2100264823bb6b9e20b15389e044c6c6c9cd6fb9..f93474a122f8f9f812750b94cf20c5c94b5b0823 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -644,6 +644,7 @@ class LeakyReluDoubleGrad : public framework::OperatorWithKernel { // // ReluGrad: dx = dy if y >= 0 else 0 // ReluGradGrad: ddy = ddx if y >= 0 else 0 +// dy = 0 // class ReluDoubleGradMaker : public ::paddle::framework::SingleGradOpDescMaker { public: @@ -655,11 +656,12 @@ class ReluDoubleGradMaker : public ::paddle::framework::SingleGradOpDescMaker { op->SetType("relu_grad_grad"); // input1: Out op->SetInput("Out", Input("Out")); - // X@GRAD@GRAD: ddx + // input2: ddx op->SetInput("DDX", OutputGrad(framework::GradVarName("X"))); op->SetAttrMap(Attrs()); - // Out@GRAD@GRAD: ddy + // output1: ddy op->SetOutput("DOut", InputGrad("Out")); + // output2: ddy op->SetOutput("DDOut", InputGrad(framework::GradVarName("Out"))); return std::unique_ptr<::paddle::framework::OpDesc>(op); } diff --git a/paddle/fluid/operators/anakin/anakin_engine_op.h b/paddle/fluid/operators/anakin/anakin_engine_op.h index e4feb14b2271a50c8e8fb7ce4c81dd6c99042e21..11c394c76cd9828d4ff84712a23236dfc8f919e0 100644 --- a/paddle/fluid/operators/anakin/anakin_engine_op.h +++ b/paddle/fluid/operators/anakin/anakin_engine_op.h @@ -34,28 +34,17 @@ limitations under the License. */ namespace paddle { namespace operators { -using FluidDT = framework::proto::VarType_Type; using inference::Singleton; - -using anakin::graph::GraphGlobalMem; -using anakin::AK_FLOAT; -using anakin::Precision; -using anakin::saber::NV; -using anakin::saber::X86; -using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; using inference::anakin::AnakinEngine; class AnakinEngineOp : public framework::OperatorBase { - using AnakinNvEngineT = AnakinEngine; - private: std::vector input_names_; std::unordered_set param_names_; - mutable AnakinNvEngineT *anakin_engine_; std::string engine_key_; std::string engine_serialized_data_; + bool use_gpu_; + bool enable_int8_; public: AnakinEngineOp(const std::string &type, @@ -66,10 +55,11 @@ class AnakinEngineOp : public framework::OperatorBase { input_names_ = Inputs("Xs"); engine_key_ = Attr("engine_key"); auto params = Attr>("parameters"); + use_gpu_ = Attr("use_gpu"); + enable_int8_ = Attr("enable_int8"); for (const auto ¶m : params) { param_names_.insert(param); } - anakin_engine_ = nullptr; } protected: @@ -80,19 +70,12 @@ class AnakinEngineOp : public framework::OperatorBase { void RunAnakin(const framework::Scope &scope, const platform::Place &dev_place) const { - auto *engine = GetEngine(scope, dev_place); - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &dev_ctx = *pool.Get(dev_place); - auto stream = - reinterpret_cast(dev_ctx).stream(); - PADDLE_ENFORCE(!input_names_.empty(), "should pass more than one inputs"); std::vector output_maps = Attr>("output_name_mapping"); std::map inputs; - // Convert input tensor from fluid to engine. for (const auto &x : Inputs("Xs")) { if (param_names_.count(x)) continue; auto &t = @@ -110,17 +93,38 @@ class AnakinEngineOp : public framework::OperatorBase { outputs.insert({output_maps[output_index], fluid_t}); output_index += 1; } - engine->Execute(inputs, outputs, stream); + if (enable_int8_) { + Execute<::anakin::Precision::INT8>(inputs, outputs, dev_place); + } else { + Execute<::anakin::Precision::FP32>(inputs, outputs, dev_place); + } } - AnakinNvEngineT *GetEngine(const framework::Scope &scope, - const platform::Place &dev_place) const { - if (anakin_engine_ == nullptr) { - anakin_engine_ = - inference::Singleton::Global() + template <::anakin::Precision PrecisionT> + void Execute(const std::map &inputs, + const std::map &outputs, + const platform::Place &dev_place) const { + if (use_gpu_) { +#ifdef PADDLE_WITH_CUDA + platform::DeviceContextPool &pool = + platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + auto stream = + reinterpret_cast(dev_ctx) + .stream(); + auto *engine = + inference::Singleton>::Global() + .Get(engine_key_); + engine->Execute(inputs, outputs, stream); +#endif + } else { + auto *engine = + inference::Singleton>::Global() .Get(engine_key_); + engine->Execute(inputs, outputs); } - return anakin_engine_; } }; diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index b1a6d66b80efdae3e78d7c3321a6107d2dd607aa..029b05bb662440bcf94521376b56d234a828ddf5 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -59,18 +59,13 @@ class ConcatOp : public framework::OperatorWithKernel { } } } else { - if (ctx->IsRuntime()) { + bool check_shape = + ctx->IsRuntime() || (out_dims[j] > 0 && ins[i][j] > 0); + if (check_shape) { // check all shape in run time PADDLE_ENFORCE_EQ(out_dims[j], ins[i][j], "Input tensors should have the same " "elements except the specify axis."); - } else { - // not check -1 with other in compile time - if (out_dims[j] > 0 && ins[i][j] > 0) { - PADDLE_ENFORCE_EQ(out_dims[j], ins[i][j], - "Input tensors should have the same " - "elements except the specify axis."); - } } } } diff --git a/paddle/fluid/operators/controlflow/fetch_op.cc b/paddle/fluid/operators/controlflow/fetch_op.cc index c197b45e8196a47def6465128e8ca39d8daefed6..85d36c5c3af966c813e03a0de1a3f191d1ecde3a 100644 --- a/paddle/fluid/operators/controlflow/fetch_op.cc +++ b/paddle/fluid/operators/controlflow/fetch_op.cc @@ -54,7 +54,13 @@ class FetchOp : public framework::OperatorBase { // FIXME(yuyang18): Should we assume the fetch operator always generate // CPU outputs? - TensorCopySync(src_item, platform::CPUPlace(), &dst_item); + if (src_item.IsInitialized() && src_item.numel() > 0) { + TensorCopySync(src_item, platform::CPUPlace(), &dst_item); + } else { + // Not copy, if the src tensor is empty. + dst_item.clear(); + dst_item.Resize({0}); + } dst_item.set_lod(src_item.lod()); VLOG(3) << "Fetch variable " << fetch_var_name << " to " << out_name; diff --git a/paddle/fluid/operators/conv_cudnn_helper.h b/paddle/fluid/operators/conv_cudnn_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..c2ad468fa6029158e6f5aaaafda1b6125fec954f --- /dev/null +++ b/paddle/fluid/operators/conv_cudnn_helper.h @@ -0,0 +1,271 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/framework/operator_kernel_configs.h" +#include "paddle/fluid/operators/conv_cudnn_op_cache.h" +#include "paddle/fluid/platform/cudnn_desc.h" + +namespace paddle { +namespace operators { + +using framework::AlgorithmsCache; + +struct ConvArgs { + cudnnHandle_t handle; + platform::TensorDescriptor idesc, odesc; + platform::FilterDescriptor wdesc; + platform::ConvolutionDescriptor cdesc; + const framework::Tensor *x, *w, *o; + + // strides + std::vector s; + // paddings + std::vector p; + // dilations + std::vector d; + + ConvArgs(const framework::Tensor* x, const framework::Tensor* w, + const framework::Tensor* o, const std::vector s, + const std::vector p, const std::vector d) + : x(x), w(w), o(o), s(s), p(p), d(d) {} +}; + +template +struct SearchAlgorithm {}; + +template <> +struct SearchAlgorithm { + using perf_t = cudnnConvolutionFwdAlgoPerf_t; + using algo_t = cudnnConvolutionFwdAlgo_t; + + template + static algo_t Find(const ConvArgs& args, bool exhaustive_search, + bool deterministic, int algo_cache_id, + const framework::ExecutionContext& ctx) { + auto dtype = platform::CudnnDataType::type; + bool exhaustive = (exhaustive_search) & (dtype != CUDNN_DATA_HALF); + + size_t workspace_size_limit = FLAGS_conv_workspace_size_limit * 1024 * 1024; + + algo_t algo; + if (!exhaustive) { + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( + args.handle, args.idesc.desc(), args.wdesc.desc(), args.cdesc.desc(), + args.odesc.desc(), CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, + workspace_size_limit, &algo)); + VLOG(3) << "choose algo " << algo; + } else { + AlgorithmsCache& algo_cache = + ctx.GetKernelConfig>(algo_cache_id); + auto& dev_ctx = + ctx.template device_context(); + auto workspace_handle = dev_ctx.cudnn_workspace_handle(); + + auto x_dims = framework::vectorize(args.x->dims()); + auto w_dims = framework::vectorize(args.w->dims()); + + algo = algo_cache.GetAlgorithm( + x_dims, w_dims, args.s, args.p, args.d, 0, [&]() { + int returned_algo_count; + std::array perf_stat; + + auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { + CUDNN_ENFORCE( + platform::dynload::cudnnFindConvolutionForwardAlgorithmEx( + args.handle, args.idesc.desc(), args.x->data(), + args.wdesc.desc(), args.w->data(), args.cdesc.desc(), + args.odesc.desc(), const_cast(args.o->data()), + kNUM_CUDNN_FWD_ALGS, &returned_algo_count, + perf_stat.data(), cudnn_workspace_ptr, + workspace_size_limit)); + }; + workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit); + + VLOG(3) << "FwdAlgo Perf result: (algo: stat, time, memory)"; + for (int i = 0; i < returned_algo_count; ++i) { + const auto& stat = perf_stat[i]; + VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time + << " " << stat.memory; + } + return perf_stat[0].algo; + }); + } + VLOG(3) << "choose algo " << algo; + return algo; + } + + static size_t GetWorkspaceSize(const ConvArgs& args, algo_t algo) { + size_t workspace_size = 0; + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( + args.handle, args.idesc.desc(), args.wdesc.desc(), args.cdesc.desc(), + args.odesc.desc(), algo, &workspace_size)); + return workspace_size; + } +}; + +template <> +struct SearchAlgorithm { + using perf_t = cudnnConvolutionBwdDataAlgoPerf_t; + using algo_t = cudnnConvolutionBwdDataAlgo_t; + + template + static algo_t Find(const ConvArgs& args, bool exhaustive_search, + bool deterministic, int algo_cache_id, + const framework::ExecutionContext& ctx) { + auto dtype = platform::CudnnDataType::type; + bool exhaustive = (exhaustive_search) & (dtype != CUDNN_DATA_HALF); + + size_t workspace_size_limit = FLAGS_conv_workspace_size_limit * 1024 * 1024; + + algo_t algo; + if (!exhaustive && !deterministic) { + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( + args.handle, args.wdesc.desc(), args.idesc.desc(), args.cdesc.desc(), + args.odesc.desc(), CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, + workspace_size_limit, &algo)); + } else if (deterministic) { + return CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; + } else { + AlgorithmsCache& algo_cache = + ctx.GetKernelConfig>(algo_cache_id); + auto& dev_ctx = + ctx.template device_context(); + auto workspace_handle = dev_ctx.cudnn_workspace_handle(); + + auto x_dims = framework::vectorize(args.x->dims()); + auto w_dims = framework::vectorize(args.w->dims()); + + algo = algo_cache.GetAlgorithm( + x_dims, w_dims, args.s, args.p, args.d, 0, [&]() { + int returned_algo_count; + std::array perf_stat; + + auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { + CUDNN_ENFORCE( + platform::dynload:: + cudnnFindConvolutionBackwardDataAlgorithmEx( + args.handle, args.wdesc.desc(), args.w->data(), + args.odesc.desc(), args.o->data(), + args.cdesc.desc(), args.idesc.desc(), + const_cast(args.x->data()), + kNUM_CUDNN_BWD_DATA_ALGS, &returned_algo_count, + perf_stat.data(), cudnn_workspace_ptr, + workspace_size_limit)); + }; + workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit); + + VLOG(3) << "BwdDataAlgo Perf result: (algo: stat, time, memory)"; + for (int i = 0; i < returned_algo_count; ++i) { + const auto& stat = perf_stat[i]; + VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time + << " " << stat.memory; + } + + return perf_stat[0].algo; + }); + } + VLOG(3) << "choose algo " << algo; + return algo; + } + + static size_t GetWorkspaceSize(const ConvArgs& args, algo_t algo) { + size_t workspace_size = 0; + CUDNN_ENFORCE( + platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( + args.handle, args.wdesc.desc(), args.idesc.desc(), + args.cdesc.desc(), args.odesc.desc(), algo, &workspace_size)); + return workspace_size; + } +}; + +template <> +struct SearchAlgorithm { + using perf_t = cudnnConvolutionBwdFilterAlgoPerf_t; + using algo_t = cudnnConvolutionBwdFilterAlgo_t; + + template + static algo_t Find(const ConvArgs& args, bool exhaustive_search, + bool deterministic, int algo_cache_id, + const framework::ExecutionContext& ctx) { + auto dtype = platform::CudnnDataType::type; + bool exhaustive = (exhaustive_search) & (dtype != CUDNN_DATA_HALF); + + size_t workspace_size_limit = FLAGS_conv_workspace_size_limit * 1024 * 1024; + + algo_t algo; + if (!exhaustive && !deterministic) { + CUDNN_ENFORCE( + platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( + args.handle, args.idesc.desc(), args.odesc.desc(), + args.cdesc.desc(), args.wdesc.desc(), + CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT, + workspace_size_limit, &algo)); + } else if (deterministic) { + return CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; + } else { + AlgorithmsCache& algo_cache = + ctx.GetKernelConfig>(algo_cache_id); + auto& dev_ctx = + ctx.template device_context(); + auto workspace_handle = dev_ctx.cudnn_workspace_handle(); + + auto x_dims = framework::vectorize(args.x->dims()); + auto w_dims = framework::vectorize(args.w->dims()); + + algo = algo_cache.GetAlgorithm( + x_dims, w_dims, args.s, args.p, args.d, 0, [&]() { + int returned_algo_count; + std::array perf_stat; + auto cudnn_find_func = [&](void* cudnn_workspace_ptr) { + CUDNN_ENFORCE( + platform::dynload:: + cudnnFindConvolutionBackwardFilterAlgorithmEx( + args.handle, args.idesc.desc(), args.x->data(), + args.odesc.desc(), args.o->data(), + args.cdesc.desc(), args.wdesc.desc(), + const_cast(args.w->data()), + kNUM_CUDNN_BWD_FILTER_ALGS, &returned_algo_count, + perf_stat.data(), cudnn_workspace_ptr, + workspace_size_limit)); + }; + workspace_handle.RunFuncSync(cudnn_find_func, workspace_size_limit); + + VLOG(3) << "BwdFilterAlgo Perf result: (algo: stat, time, memory)"; + for (int i = 0; i < returned_algo_count; ++i) { + const auto& stat = perf_stat[i]; + VLOG(3) << stat.algo << ": " << stat.status << " " << stat.time + << " " << stat.memory; + } + return perf_stat[0].algo; + }); + } + VLOG(3) << "choose algo " << algo; + return algo; + } + + static size_t GetWorkspaceSize(const ConvArgs& args, algo_t algo) { + size_t workspace_size = 0; + CUDNN_ENFORCE( + platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize( + args.handle, args.idesc.desc(), args.odesc.desc(), + args.cdesc.desc(), args.wdesc.desc(), algo, &workspace_size)); + return workspace_size; + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 9a545160a10d4396802e04de0535de053dca6af0..158d6ced274dd33b1378403b325e736037fc042d 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/operators/conv_cudnn_helper.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/platform/assert.h" @@ -46,6 +47,23 @@ template using ScalingParamType = typename platform::CudnnDataType::ScalingParamType; using framework::AlgorithmsCache; +static inline void GetNCDHW(const framework::DDim& dims, + const DataLayout& layout, int* N, int* C, int* D, + int* H, int* W) { + *N = dims[0]; + *C = layout == DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1]; + int i = layout == DataLayout::kNCHW ? 0 : 1; + if (dims.size() == 5) { + *D = dims[2 - i]; + *H = dims[3 - i]; + *W = dims[4 - i]; + } else { + *D = 1; + *H = dims[2 - i]; + *W = dims[3 - i]; + } +} + template class CUDNNConvOpKernel : public framework::OpKernel { public: @@ -99,33 +117,13 @@ class CUDNNConvOpKernel : public framework::OpKernel { cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor( layout, framework::vectorize2int(filter->dims()), groups); - int input_channels = input->dims()[1]; - int input_height, input_width, input_depth; - if (input->dims().size() == 5) { - input_depth = input->dims()[2]; - input_height = input->dims()[3]; - input_width = input->dims()[4]; - } else { // dim size is enforced in InferShape - input_depth = 1; - input_height = input->dims()[2]; - input_width = input->dims()[3]; - } - int output_channels = filter->dims()[0]; - int output_height, output_width, output_depth; - if (output->dims().size() == 5) { - output_depth = output->dims()[2]; - output_height = output->dims()[3]; - output_width = output->dims()[4]; - } else { - output_depth = 1; - output_height = output->dims()[2]; - output_width = output->dims()[3]; - } + int i_n, i_c, i_d, i_h, i_w; + GetNCDHW(input->dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); + int o_n, o_c, o_d, o_h, o_w; + GetNCDHW(output->dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); - int group_offset_in = - input_channels / groups * input_height * input_width * input_depth; - int group_offset_out = - output_channels / groups * output_height * output_width * output_depth; + int group_offset_in = i_c / groups * i_h * i_w * i_d; + int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = filter->numel() / groups; // ------------------- cudnn conv workspace --------------------- size_t workspace_size_in_bytes; // final workspace to allocate. @@ -164,6 +162,9 @@ class CUDNNConvOpKernel : public framework::OpKernel { auto workspace_handle = dev_ctx.cudnn_workspace_handle(); auto x_dims = framework::vectorize(input->dims()); auto f_dims = framework::vectorize(filter->dims()); + + // TODO(dangqingqing) simplify the following code by SearchAlgorithm in + // conv_cudnn_helper.h if ((!exhaustive_search) && (!half_float)) { CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, @@ -315,34 +316,14 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { } #endif - int input_channels = input->dims()[1]; - int input_height, input_width, input_depth; - if (input->dims().size() == 5) { - input_depth = input->dims()[2]; - input_height = input->dims()[3]; - input_width = input->dims()[4]; - } else { // dim size is enforced in InferShape - input_depth = 1; - input_height = input->dims()[2]; - input_width = input->dims()[3]; - } + int i_n, i_c, i_d, i_h, i_w; + GetNCDHW(input->dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); + int o_n, o_c, o_d, o_h, o_w; + GetNCDHW(output_grad->dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, + &o_w); - int output_grad_channels = filter->dims()[0]; - int output_grad_height, output_grad_width, output_grad_depth; - if (input->dims().size() == 5) { - output_grad_depth = output_grad->dims()[2]; - output_grad_height = output_grad->dims()[3]; - output_grad_width = output_grad->dims()[4]; - } else { - output_grad_depth = 1; - output_grad_height = output_grad->dims()[2]; - output_grad_width = output_grad->dims()[3]; - } - - int group_offset_in = - input_channels / groups * input_height * input_width * input_depth; - int group_offset_out = output_grad_channels / groups * output_grad_height * - output_grad_width * output_grad_depth; + int group_offset_in = i_c / groups * i_h * i_w * i_d; + int group_offset_out = o_c / groups * o_h * o_w * o_d; int group_offset_filter = filter->numel() / groups; // ------------------- cudnn backward algorithm --------------------- cudnnConvolutionBwdDataAlgo_t data_algo; @@ -367,6 +348,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { cudnn_workspace_ptr = static_cast(cudnn_workspace.data()); } + // TODO(dangqingqing) simplify the following code by SearchAlgorithm in + // conv_cudnn_helper.h auto x_dims = framework::vectorize(input->dims()); auto f_dims = framework::vectorize(filter->dims()); auto handle = dev_ctx.cudnn_handle(); @@ -512,6 +495,212 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { } }; +/* + * Inputs: I, W, dO, ddI, ddW + * Outputs: ddO, dW, dI + * ddo = conv(ddI, W) + conv(I, ddW) + * dW = conv_bp_filter(ddI, dO) + * dI = conv_bp_data(ddW, dO) + */ +template +class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& dev_ctx = ctx.template device_context(); + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + auto X = ctx.Input("Input"); + auto W = ctx.Input("Filter"); + auto dO = ctx.Input("DOutput"); + auto ddX = ctx.Input("DDInput"); + auto ddW = ctx.Input("DDFilter"); + + auto ddO = ctx.Output("DDOutput"); + auto dW = ctx.Output("DFilter"); + auto dX = ctx.Output("DInput"); + + const T* x = X->data(); + const T* dy = dO->data(); + const T* w = W->data(); + + const T* ddx = nullptr; + const T* ddw = nullptr; + T *dw, *dx, *ddy; + dw = dx = ddy = nullptr; + + const std::vector& strides = ctx.Attr>("strides"); + const std::vector& paddings = ctx.Attr>("paddings"); + const std::vector& dilations = ctx.Attr>("dilations"); + int groups = ctx.Attr("groups"); + bool exhaustive_search = + FLAGS_cudnn_exhaustive_search || ctx.Attr("exhaustive_search"); + bool deterministic = FLAGS_cudnn_deterministic; + if (exhaustive_search && deterministic) { + PADDLE_THROW( + "Cann't set exhaustive_search True and " + "FLAGS_cudnn_deterministic True at same time."); + } + + int iwo_group = groups; + int c_group = 1; +#if CUDNN_VERSION_MIN(7, 0, 1) + iwo_group = 1; + c_group = groups; +#endif + auto dtype = platform::CudnnDataType::type; + + auto handle = dev_ctx.cudnn_handle(); + + ConvArgs args1{ddX, W, ddO, strides, paddings, dilations}; + ConvArgs args2{X, ddW, ddO, strides, paddings, dilations}; + ConvArgs args3{ddX, dW, dO, strides, paddings, dilations}; + ConvArgs args4{dX, ddW, dO, strides, paddings, dilations}; + + cudnnConvolutionFwdAlgo_t fwd_algo1 = + static_cast(0); + cudnnConvolutionFwdAlgo_t fwd_algo2 = + static_cast(0); + cudnnConvolutionBwdDataAlgo_t data_algo = + static_cast(0); + cudnnConvolutionBwdFilterAlgo_t filter_algo = + static_cast(0); + + auto layout = GetCudnnTensorFormat(DataLayout::kNCHW); + + // ddo = conv(ddI, W) + conv(I, ddW) + size_t workspace_size = 0; + if (ddO) { + ddy = ddO->mutable_data(ctx.GetPlace()); + args1.handle = handle; + args1.idesc.set(*ddX, iwo_group); + args1.wdesc.set(*W, layout, iwo_group); + args1.odesc.set(*ddO, iwo_group); + args1.cdesc.set(dtype, paddings, strides, dilations, c_group); + + using search1 = SearchAlgorithm; + fwd_algo1 = search1::Find(args1, exhaustive_search, false, 0, ctx); + workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1); + + if (ddW) { + ddw = ddW->data(); + args2.handle = handle; + args2.idesc.set(*X, iwo_group); + args2.wdesc.set(*ddW, layout, iwo_group); + args2.odesc.set(*ddO, iwo_group); + args2.cdesc.set(dtype, paddings, strides, dilations, c_group); + + using search2 = SearchAlgorithm; + fwd_algo2 = search2::Find(args2, exhaustive_search, false, 0, ctx); + workspace_size = std::max(workspace_size, + search2::GetWorkspaceSize(args2, fwd_algo2)); + } + } + + if (dW) { + dw = dW->mutable_data(ctx.GetPlace()); + args3.handle = handle; + args3.idesc.set(*ddX, iwo_group); + args3.wdesc.set(*dW, layout, iwo_group); + args3.odesc.set(*dO, iwo_group); + args3.cdesc.set(dtype, paddings, strides, dilations, c_group); + + using search3 = SearchAlgorithm; + filter_algo = + search3::Find(args3, exhaustive_search, deterministic, 1, ctx); + workspace_size = std::max(workspace_size, + search3::GetWorkspaceSize(args3, filter_algo)); + } + + if (ddW && dX) { + dx = dX->mutable_data(ctx.GetPlace()); + args4.handle = handle; + args4.idesc.set(*dX, iwo_group); + args4.wdesc.set(*ddW, layout, iwo_group); + args4.odesc.set(*dO, iwo_group); + args4.cdesc.set(dtype, paddings, strides, dilations, c_group); + + using search4 = SearchAlgorithm; + data_algo = + search4::Find(args4, exhaustive_search, deterministic, 2, ctx); + workspace_size = + std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo)); + } + + int i_n, i_c, i_d, i_h, i_w; + GetNCDHW(X->dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w); + int o_n, o_c, o_d, o_h, o_w; + GetNCDHW(dO->dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d, &o_h, &o_w); + + int group_offset_in = i_c / groups * i_h * i_w * i_d; + int group_offset_out = o_c / groups * o_h * o_w * o_d; + int group_offset_filter = W->numel() / groups; + + ScalingParamType alpha = 1.0f, beta = 0.0f; + auto wkspace_handle = dev_ctx.cudnn_workspace_handle(); + + if (ddO) { + ddx = ddX->data(); + for (int i = 0; i < groups; i++) { + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( + handle, &alpha, args1.idesc.desc(), ddx + i * group_offset_in, + args1.wdesc.desc(), w + i * group_offset_filter, + args1.cdesc.desc(), fwd_algo1, workspace_ptr, workspace_size, + &beta, args1.odesc.desc(), ddy + i * group_offset_out)); + }, + workspace_size); + } + if (ddW) { + for (int i = 0; i < groups; i++) { + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( + handle, &alpha, args2.idesc.desc(), x + i * group_offset_in, + args2.wdesc.desc(), ddw + i * group_offset_filter, + args2.cdesc.desc(), fwd_algo2, workspace_ptr, + workspace_size, &alpha, args2.odesc.desc(), + ddy + i * group_offset_out)); + }, + workspace_size); + } + } + } + + if (dW) { + ddx = ddX->data(); + for (int i = 0; i < groups; i++) { + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( + handle, &alpha, args3.idesc.desc(), ddx + i * group_offset_in, + args3.odesc.desc(), dy + i * group_offset_out, + args3.cdesc.desc(), filter_algo, workspace_ptr, + workspace_size, &beta, args3.wdesc.desc(), + dw + i * group_offset_filter)); + }, + workspace_size); + } + } + + if (dX && ddW) { + ddw = ddW->data(); + for (int i = 0; i < groups; i++) { + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( + handle, &alpha, args4.wdesc.desc(), + ddw + i * group_offset_filter, args4.odesc.desc(), + dy + i * group_offset_out, args4.cdesc.desc(), data_algo, + workspace_ptr, workspace_size, &beta, args4.idesc.desc(), + dx + i * group_offset_in)); + }, + workspace_size); + } + } + } +}; + } // namespace operators } // namespace paddle @@ -524,6 +713,11 @@ REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvGradOpKernel, paddle::operators::CUDNNConvGradOpKernel, paddle::operators::CUDNNConvGradOpKernel); +REGISTER_OP_KERNEL( + conv2d_grad_grad, CUDNN, plat::CUDAPlace, + paddle::operators::CUDNNConvDoubleGradOpKernel, + paddle::operators::CUDNNConvDoubleGradOpKernel, + paddle::operators::CUDNNConvDoubleGradOpKernel); REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace, paddle::operators::CUDNNConvOpKernel, diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 1bacc54b61d7f7d1f6e62a317a97cd96cf15669e..5b923f8a5eb58cfdf5809c677dfc915a68c64aae 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -506,13 +506,100 @@ class Conv3DGradMaker : public framework::SingleGradOpDescMaker { } }; +/* + * Inputs: I, W, dO, ddI, ddW + * Outputs: ddO, dW, dI + */ +class Conv2DDoubleGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType(this->ForwardOpType() + "_grad"); + // I, W, dO, ddI, ddW + op->SetInput("Input", Input("Input")); + op->SetInput("Filter", Input("Filter")); + op->SetInput("DOutput", Input(framework::GradVarName("Output"))); + op->SetInput("DDInput", OutputGrad(framework::GradVarName("Input"))); + op->SetInput("DDFilter", OutputGrad(framework::GradVarName("Filter"))); + + // ddO, dI, dW + // Unlike grad op, double grad op does not use name@GRAD@GRAD + // as key of ops' inputs and outputs. + op->SetOutput("DDOutput", InputGrad(framework::GradVarName("Output"))); + op->SetOutput("DFilter", InputGrad("Filter")); + op->SetOutput("DInput", InputGrad("Input")); + op->SetAttrMap(Attrs()); + + return std::unique_ptr(op); + } +}; + +void ConvOpDoubleGrad::InferShape(framework::InferShapeContext* ctx) const { + auto x_dims = ctx->GetInputDim("Input"); + auto w_dims = ctx->GetInputDim("Filter"); + auto do_dims = ctx->GetInputDim("DOutput"); + + if (ctx->HasOutput("DDOutput")) { + ctx->SetOutputDim("DDOutput", do_dims); + } + if (ctx->HasOutput("DFilter")) { + ctx->SetOutputDim("DFilter", w_dims); + } + if (ctx->HasOutput("DInput")) { + ctx->SetOutputDim("DInput", x_dims); + } +} + +framework::OpKernelType ConvOpDoubleGrad::GetExpectedKernelType( + const framework::ExecutionContext& ctx) const { + int customized_type_value = + framework::OpKernelType::kDefaultCustomizedTypeValue; + framework::LibraryType library_{framework::LibraryType::kPlain}; + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + +#ifdef PADDLE_WITH_CUDA + if (platform::CanCUDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kCUDNN; + } else { + PADDLE_THROW("Now ConvDoubleGrad only supports cuDNN."); + } +#endif + auto type = framework::OpKernelType(ctx.Input("Input")->type(), + ctx.GetPlace(), layout_, library_, + customized_type_value); +#ifdef PADDLE_WITH_CUDA + if (library_ == framework::LibraryType::kCUDNN) { + std::vector& configs = kernel_configs_map_[type]; + if (configs.empty()) { + std::shared_ptr> p0( + new framework::AlgorithmsCache()); + configs.push_back(p0); + + std::shared_ptr< + framework::AlgorithmsCache> + p1(new framework::AlgorithmsCache()); + configs.push_back(p1); + + std::shared_ptr> + p2(new framework::AlgorithmsCache()); + configs.push_back(p2); + } + } +#endif + return type; +} + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker, ops::ConvOpInferVarType, ops::Conv2DGradMaker); -REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad); +REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad, ops::Conv2DDoubleGradMaker); +REGISTER_OPERATOR(conv2d_grad_grad, ops::ConvOpDoubleGrad); // depthwise convolution op REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker, diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index 797c665165975b4230c0edda460e3eebba8e400c..4df47ef261ef51101d1c4df31e6aff1a9801b329 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" @@ -107,6 +108,16 @@ class ConvOpGrad : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override; }; +class ConvOpDoubleGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override; + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override; +}; + template class GemmConvKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index ad32de53e7019b438b7106ddd031a8f00bd79b5d..da2c74b0c8a8b0fbeee13c4a3d490d7761abb93c 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -35,11 +35,9 @@ class CrossEntropyOpBase : public framework::OperatorWithKernel { int rank = x_dims.size(); PADDLE_ENFORCE_EQ(rank, label_dims.size(), "Input(X) and Input(Label) shall have the same rank."); - bool check = true; - if ((!ctx->IsRuntime()) && (framework::product(x_dims) <= 0 || - framework::product(label_dims) <= 0)) { - check = false; - } + bool contain_unknown_dim = framework::contain_unknown_dim(x_dims) || + framework::contain_unknown_dim(label_dims); + bool check = ctx->IsRuntime() || !contain_unknown_dim; if (check) { PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), framework::slice_ddim(label_dims, 0, rank - 1), diff --git a/paddle/fluid/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h index 7eb663773ed072760c47a2914377b5306ceeb7af..89bacfc33edceb77017aad599c081710f4d4db33 100644 --- a/paddle/fluid/operators/cross_entropy_op.h +++ b/paddle/fluid/operators/cross_entropy_op.h @@ -39,9 +39,10 @@ class CrossEntropyOpKernel : public framework::OpKernel { Tensor labels_2d = framework::ReshapeToMatrix(*labels, rank - 1); Tensor y_2d = framework::ReshapeToMatrix(*y, rank - 1); + int axis_dim = x->dims()[rank - 1]; math::CrossEntropyFunctor()( ctx.template device_context(), &y_2d, &x_2d, &labels_2d, - ctx.Attr("soft_label"), ctx.Attr("ignore_index")); + ctx.Attr("soft_label"), ctx.Attr("ignore_index"), axis_dim); } }; @@ -153,6 +154,8 @@ struct HardLabelCrossEntropyForwardFunctor { HOSTDEVICE void operator()(int64_t idx) const { auto label = label_[idx]; + PADDLE_ASSERT_MSG(label >= 0 && label < feature_size_, + "The label is out of the range.", label); if (label != ignore_index_) { auto match_x = x_[idx * feature_size_ + label]; y_[idx] = -math::TolerableValue()(real_log(match_x)); diff --git a/paddle/fluid/operators/detection/roi_perspective_transform_op.cu b/paddle/fluid/operators/detection/roi_perspective_transform_op.cu index 74c8384e1e7cbb94492763ba08effff49663cd5b..85eb0c45e06df39309d4a10218f2c1649a369d1a 100644 --- a/paddle/fluid/operators/detection/roi_perspective_transform_op.cu +++ b/paddle/fluid/operators/detection/roi_perspective_transform_op.cu @@ -466,6 +466,10 @@ class CUDAROIPerspectiveTransformGradOpKernel : public framework::OpKernel { auto* in_grad = ctx.Output(framework::GradVarName("X")); T* in_grad_data = in_grad->mutable_data(ctx.GetPlace()); + + math::SetConstant set_zero; + set_zero(ctx.cuda_device_context(), in_grad, static_cast(0)); + const T* out_grad_data = out_grad->data(); const int* out2in_idx_data = out2in_idx->data(); const T* out2in_w_data = out2in_w->data(); diff --git a/paddle/fluid/operators/diag_op.cc b/paddle/fluid/operators/diag_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5fb18a1d695381d14203b19653eb3cbc2508ee4d --- /dev/null +++ b/paddle/fluid/operators/diag_op.cc @@ -0,0 +1,60 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/diag_op.h" + +namespace paddle { +namespace operators { + +class DiagOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Diagonal"), + "Input(Diagonal) of DiagOp should not be null."); + + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of DiagOp should not be null."); + + auto s_dims = ctx->GetInputDim("Diagonal"); + PADDLE_ENFORCE(s_dims.size() == 1, + "The rank of Input(Diagonal) should only be 1."); + + ctx->SetOutputDim("Out", {s_dims[0], s_dims[0]}); + } +}; + +class DiagOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Diagonal", + "Diagonal values of square matrix. It is a tensor with rank 1."); + AddOutput("Out", "A square matrix."); + AddComment(R"DOC( + Return a square matrix with specified diagonal values. +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(diag, ops::DiagOp, ops::DiagOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL( + diag, ops::DiagKernel, + ops::DiagKernel, + ops::DiagKernel, + ops::DiagKernel); diff --git a/paddle/fluid/operators/diag_op.cu b/paddle/fluid/operators/diag_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..9fe1b83b66d54a03e8d812589c1e9a3bf995f69c --- /dev/null +++ b/paddle/fluid/operators/diag_op.cu @@ -0,0 +1,23 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/diag_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + diag, ops::DiagKernel, + ops::DiagKernel, + ops::DiagKernel, + ops::DiagKernel); diff --git a/paddle/fluid/operators/diag_op.h b/paddle/fluid/operators/diag_op.h new file mode 100644 index 0000000000000000000000000000000000000000..f89415ae08974293fa27bbd398d01df165eb901c --- /dev/null +++ b/paddle/fluid/operators/diag_op.h @@ -0,0 +1,59 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { + +template +struct DiagFunctor { + DiagFunctor(const T* diagonal, int64_t numel, T* output) + : diagonal_(diagonal), numel_(numel), output_(output) {} + + HOSTDEVICE void operator()(size_t idx) const { + output_[idx * numel_ + idx] = diagonal_[idx]; + } + + const T* diagonal_; + int64_t numel_; + T* output_; +}; + +template +class DiagKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* diagonal = context.Input("Diagonal"); + auto* diag_data = diagonal->data(); + auto numel = diagonal->numel(); + auto* out = context.Output("Out"); + T* out_data = out->mutable_data(context.GetPlace()); + + math::SetConstant set_zero; + auto& dev_ctx = context.template device_context(); + set_zero(dev_ctx, out, static_cast(0)); + + platform::ForRange for_range(dev_ctx, numel); + DiagFunctor functor(diag_data, numel, out_data); + for_range(functor); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc index d63a7df03d0de7489a507825b066ab365e1ef8b9..fadebc00cf451736f3dbc3a6c4d9d63397582f6f 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mod_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_mod_op.cc @@ -21,7 +21,7 @@ namespace operators { class ElementwiseModOpMaker : public ElementwiseOpMaker { protected: std::string GetName() const override { return "Mod"; } - std::string GetEquation() const override { return "Out = X % Y"; } + std::string GetEquation() const override { return "Out = X \\\\% Y"; } }; } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.cu b/paddle/fluid/operators/elementwise/elementwise_mul_op.cu index 50b2322b17bdba44f8c5c1dd4a9f0b2160f6a7d8..e36cc8f9f28d0ed3d3693e0a38d8bb17fa4ba25d 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.cu +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.cu @@ -14,9 +14,67 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_mul_op.h" #include "paddle/fluid/platform/float16.h" +#define TILE_SIZE 512 namespace ops = paddle::operators; namespace plat = paddle::platform; +namespace paddle { +namespace operators { + +template +static __global__ void SimpleElemwiseMulGradCUDAKernel(const T* x, const T* y, + const T* out, + const T* dout, + int64_t size, T* dx, + T* dy) { + int col = blockIdx.x * blockDim.x + threadIdx.x; + + while (col < size) { + T o = dout[col]; + dx[col] = y[col] * o; + dy[col] = x[col] * o; + col += blockDim.x * gridDim.x; + } +} + +template +class ElementwiseMulGradKernel + : public ElemwiseGradKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + ElemwiseGradKernel::Compute(ctx); + using Tensor = framework::Tensor; + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* dout = ctx.Input(framework::GradVarName("Out")); + auto* out = dout; // out is not necessary + auto* dx = ctx.Output(framework::GradVarName("X")); + auto* dy = ctx.Output(framework::GradVarName("Y")); + int axis = ctx.Attr("axis"); + + if (x->dims() == y->dims() && dx && dy) { + dim3 block_size = dim3(TILE_SIZE, 1); + auto size = x->numel(); + dim3 gird_size = dim3((size + TILE_SIZE - 1) / TILE_SIZE, 1); + SimpleElemwiseMulGradCUDAKernel<<< + gird_size, block_size, 0, + ctx.template device_context().stream()>>>( + x->data(), y->data(), out->data(), dout->data(), size, + dx->mutable_data(ctx.GetPlace()), + dy->mutable_data(ctx.GetPlace())); + return; + } else { + ElemwiseGradCompute, + MulGradDY>(ctx, *x, *y, *out, *dout, axis, dx, dy, + MulGradDX(), MulGradDY()); + } + } +}; + +} // namespace operators +} // namespace paddle + REGISTER_OP_CUDA_KERNEL( elementwise_mul, ops::ElementwiseMulKernel, ops::ElementwiseMulKernel, diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index 22d1d0dfbe47b1585998748c29ddb0baa407256f..5ec335972a02a3a6911274ba7609f50665f3d0e0 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -145,7 +145,7 @@ For case 2: For example: - .. code-block:: python + .. code-block:: text shape(X) = (2, 3, 4, 5), shape(Y) = (,) shape(X) = (2, 3, 4, 5), shape(Y) = (5,) diff --git a/paddle/fluid/operators/elementwise/elementwise_op_function.h b/paddle/fluid/operators/elementwise/elementwise_op_function.h index cb8a4e7e1502e7e6ceb48e51452c2c7ab8313972..2e91ec84848b0f491dca0a271d9326e3c37632ea 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_function.h @@ -351,14 +351,65 @@ static __global__ void ElemwiseGradBroadcast1CUDAKernel( } } +#define BLOCK_X 32 +#define BLOCK_Y 32 + +// suppose use 2D block is fast because more parallel +// and memory coalesced +template +static __global__ void FastElemwiseGradBroadcast1CUDAKernel( + const T *x, const T *y, const T *out, const T *dout, int h, int w, + DX_OP dx_op, DY_OP dy_op, T *dx, T *dy) { + __shared__ T sdata[BLOCK_Y][BLOCK_X + 1]; + + T val(0); + size_t width_stride = gridDim.x * blockDim.x; + size_t idx = threadIdx.x + blockDim.x * blockIdx.x; + size_t full_width = + (w & (~((uint64_t)(BLOCK_X - 1)))) + ((w & (BLOCK_X - 1)) ? BLOCK_X : 0); + size_t full_height = + (h & (~((uint64_t)(BLOCK_Y - 1)))) + ((h & (BLOCK_Y - 1)) ? BLOCK_Y : 0); + + for (int m = idx; m < full_width; m += width_stride) { + sdata[threadIdx.y][threadIdx.x] = 0; + for (int n = threadIdx.y; n < full_height; n += BLOCK_Y) { + int x_offset = n * w + m; + if (dx && m < w && n < h) { + dx[x_offset] = dx_op(x[x_offset], y[m], out[x_offset], dout[x_offset]); + } + if (dy) { + if (m < w && n < h) { + T val = dy_op(x[x_offset], y[m], out[x_offset], dout[x_offset]); + sdata[threadIdx.y][threadIdx.x] += val; + } + __syncthreads(); + } + } + if (dy) { + T my_val = sdata[threadIdx.x][threadIdx.y]; + for (int i = warpSize >> 1; i > 0; i >>= 1) + my_val += platform::CudaShuffleXorSync(0xFFFFFFFF, my_val, i); + __syncthreads(); + if ((threadIdx.x == 0)) { + sdata[0][threadIdx.y] = my_val; + } + __syncthreads(); + if (threadIdx.y == 0 && m < w) { + dy[m] = sdata[0][threadIdx.x]; + } + } + } +} + template static void ElemwiseGradBroadcast1CUDA(cudaStream_t stream, const T *x, const T *y, const T *out, const T *dout, int h, int w, DX_OP dx_op, DY_OP dy_op, T *dx, T *dy) { - int block_size = std::min(ELEMWISE_MAX_BLOCK_DIM, h); - int gird_size = w; - ElemwiseGradBroadcast1CUDAKernel<<>>( + // suppose perfoemance improves with h increased. + dim3 block_size = dim3(BLOCK_X, BLOCK_Y); + int grid_size = (w + BLOCK_X - 1) / BLOCK_X; + FastElemwiseGradBroadcast1CUDAKernel<<>>( x, y, out, dout, h, w, dx_op, dy_op, dx, dy); } @@ -619,7 +670,6 @@ void ElementwiseComputeEx(const framework::ExecutionContext &ctx, auto y_dims_untrimed = y->dims(); PADDLE_ENFORCE_GE(x_dims.size(), y_dims_untrimed.size(), "Rank of first input must >= rank of second input."); - if (x_dims == y_dims_untrimed) { functor.Run(); return; @@ -1559,7 +1609,8 @@ void FusedElemwiseAndActComputeEx(const framework::ExecutionContext &ctx, // z = f1(f2(x, y)) if (bcast_y) { // Y should be broadcast. // In this case, - // for 'f2(y)', the shape of intermediate_out should be equal to the shape + // for 'f2(y)', the shape of intermediate_out should be equal to the + // shape // of Y. // for 'f2(x, y)', the shape of intermediate_out should be equal to the // shape of Out. @@ -1571,7 +1622,8 @@ void FusedElemwiseAndActComputeEx(const framework::ExecutionContext &ctx, intermediate_out); } else { // In this case, - // for 'f2(y)', the shape of intermediate_out should be equal to the shape + // for 'f2(y)', the shape of intermediate_out should be equal to the + // shape // of Out. // for 'f2(x, y)', the shape of intermediate_out should be equal to the // shape of Out. diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index 054ef4658cc0c4448d49870849017d3191d57db9..25ca1f7e0a0086b803d48aa892b0888e0d5635b1 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -388,14 +388,76 @@ class FakeQuantizeMovingAverageAbsMaxOpMaker AddComment(R"DOC( FakeQuantize operator is used in static quantization. -$$scale = (0.9*max(abs(x))+accum)/(0.9*state+1)$$ -$$range = 2^{bit_length - 1} - 1$$ +$$scale = (moving\_rate*accum+max(abs(x)))/(moving\_rate*state+1)$$ +$$range = 2^{bit\_length - 1} - 1$$ $$Out = round(X/scale * range)$$ )DOC"); } }; +class MovingAverageAbsMaxScaleOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE( + ctx->HasInput("X"), + "Input(X) of MovingAverageAbsMaxScaleOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("Out"), + "Output(Out) of MovingAverageAbsMaxScaleOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutScale"), + "Output(OutScale) of MovingAverageAbsMaxScaleOp" + "should not be null"); + if (ctx->HasOutput("OutState")) { + ctx->SetOutputDim("OutState", {1}); + } + if (ctx->HasOutput("OutAccum")) { + ctx->SetOutputDim("OutAccum", {1}); + } + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->SetOutputDim("OutScale", {1}); + ctx->ShareLoD("X", /*->*/ "Out"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.GetPlace()); + } +}; + +class MovingAverageAbsMaxScaleOpMaker + : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor) Input is float data type."); + AddInput("InAccum", "Last accum.").AsDispensable(); + AddInput("InState", "Last state.").AsDispensable(); + AddOutput("Out", + "(Tensor) Output tensor is just equivalent to the input tensor."); + AddOutput("OutScale", " Current scale"); + AddOutput("OutState", "(Tensor) state buffer.").AsDispensable(); + AddOutput("OutAccum", "(Tensor) accum buffer.").AsDispensable(); + AddAttr("moving_rate", "(float, default 0.9) moving rate.") + .SetDefault(0.9); + AddAttr("is_test", + "(bool, default false) Set true for inference only and false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); + AddComment(R"DOC( +MovingAverageAbsMaxScale operator is only used for calculating the quantization scale. +And it will not quantize the input tensor. + +$$scale = (moving\_rate*accum+max(abs(x)))/(moving\_rate*state+1)$$ +$$Out = X$$ + +)DOC"); + } +}; + } // namespace operators } // namespace paddle @@ -426,3 +488,9 @@ REGISTER_OPERATOR(fake_channel_wise_quantize_abs_max, paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL(fake_channel_wise_quantize_abs_max, ops::FakeChannelWiseQuantizeAbsMaxKernel); + +REGISTER_OPERATOR(moving_average_abs_max_scale, ops::MovingAverageAbsMaxScaleOp, + ops::MovingAverageAbsMaxScaleOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(moving_average_abs_max_scale, + ops::MovingAverageAbsMaxScaleKernel); diff --git a/paddle/fluid/operators/fake_quantize_op.cu b/paddle/fluid/operators/fake_quantize_op.cu index 7d551106756070a14f94f39f19b775d022d90777..6e1d40cac765eec93f6e3a0425ccf0329a246649 100644 --- a/paddle/fluid/operators/fake_quantize_op.cu +++ b/paddle/fluid/operators/fake_quantize_op.cu @@ -300,3 +300,5 @@ REGISTER_OP_CUDA_KERNEL(fake_quantize_range_abs_max, REGISTER_OP_CUDA_KERNEL( fake_quantize_moving_average_abs_max, ops::FakeQuantizeMovingAverageAbsMaxKernel); +REGISTER_OP_CUDA_KERNEL(moving_average_abs_max_scale, + ops::MovingAverageAbsMaxScaleKernel); diff --git a/paddle/fluid/operators/fake_quantize_op.h b/paddle/fluid/operators/fake_quantize_op.h index 5ab38b086df7f9df33996ec83b5ec07047c204ba..87bcece582442e7336049d65bcabc87eadd52342 100644 --- a/paddle/fluid/operators/fake_quantize_op.h +++ b/paddle/fluid/operators/fake_quantize_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/operators/math/blas.h" namespace paddle { @@ -197,5 +198,46 @@ class FakeQuantizeMovingAverageAbsMaxKernel : public framework::OpKernel { } }; +template +class MovingAverageAbsMaxScaleKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in = context.Input("X"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + auto& dev_ctx = context.template device_context(); + framework::TensorCopy(*in, context.GetPlace(), dev_ctx, out); + + bool is_test = context.Attr("is_test"); + // testing + if (is_test) { + return; + } + + // training + auto* in_accum = context.Input("InAccum"); + auto* in_state = context.Input("InState"); + auto& allocator = + platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx); + auto cur_scale = allocator.Allocate(1 * sizeof(T)); + T* cur_scale_data = static_cast(cur_scale->ptr()); + + FindAbsMaxFunctor()(dev_ctx, in->data(), in->numel(), + cur_scale_data); + + auto* out_state = context.Output("OutState"); + auto* out_accum = context.Output("OutAccum"); + auto* out_scale = context.Output("OutScale"); + out_state->mutable_data(context.GetPlace()); + out_accum->mutable_data(context.GetPlace()); + out_scale->mutable_data(context.GetPlace()); + float moving_rate = context.Attr("moving_rate"); + + FindMovingAverageAbsMaxFunctor()( + dev_ctx, *in_accum, *in_state, cur_scale_data, moving_rate, out_state, + out_accum, out_scale); + } +}; + } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index a863af4af914095a9ee2a7fcc986cc878fd808ea..8716662f158bd939755feda71e0ac8ea5748ac26 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -32,8 +32,8 @@ __global__ void LookupTable(T *output, const T *table, const int64_t *ids, while (idy < K) { int64_t id = ids[idy]; - PADDLE_ASSERT_MSG_CODE(id >= 0, "received id:", id); - PADDLE_ASSERT_MSG_CODE(id < N, "received id:", id); + PADDLE_ASSERT_MSG(id >= 0, "received id:", id); + PADDLE_ASSERT_MSG(id < N, "received id:", id); T *out = output + idy * D; const T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { @@ -59,8 +59,8 @@ __global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, while (idy < K) { int64_t id = ids[idy]; - PADDLE_ASSERT_MSG_CODE(id >= 0, "received id:", id); - PADDLE_ASSERT_MSG_CODE(id < N, "received id:", id); + PADDLE_ASSERT_MSG(id >= 0, "received id:", id); + PADDLE_ASSERT_MSG(id < N, "received id:", id); const T *out = output + idy * D; T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index 06ac31b5f197d8cfc19fa14df4973e42e889305d..5ad94cfde901bedae4af28e5b2a43bad08e28cf9 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -242,7 +242,7 @@ The original formula is: $$ Output(i, x, y) = Input(i, x, y) / \left( -k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)} +k + \alpha \sum\limits^{\min(C-1, i + n/2)}_{j = \max(0, i - n/2)} (Input(j, x, y))^2 \right)^{\beta} $$ diff --git a/paddle/fluid/operators/math/cross_entropy.cc b/paddle/fluid/operators/math/cross_entropy.cc index 18bf1a66f6d9903f32048574dc93faf7e98953ac..9f7884fe05f2f446b1fb6eb7dfd53e293d8e19aa 100644 --- a/paddle/fluid/operators/math/cross_entropy.cc +++ b/paddle/fluid/operators/math/cross_entropy.cc @@ -29,8 +29,13 @@ class CrossEntropyFunctor { void operator()(const platform::CPUDeviceContext& ctx, framework::Tensor* out, const framework::Tensor* prob, const framework::Tensor* labels, const bool softLabel, - const int ignore_index) { + const int ignore_index, const int axis_dim) { const int batch_size = prob->dims()[0]; + const int num_classes = prob->dims()[1]; + const int num_remain = num_classes / axis_dim; + + Eigen::DSizes batch_axis_remain(batch_size, axis_dim, num_remain); + if (softLabel) { auto in = EigenMatrix::From(*prob); auto lbl = EigenMatrix::From(*labels); @@ -38,24 +43,24 @@ class CrossEntropyFunctor { loss.device(*ctx.eigen_device()) = -((lbl * in.log().unaryExpr(math::TolerableValue())) - .sum(Eigen::DSizes(1)) - .reshape(Eigen::DSizes(batch_size, 1))); + .reshape(batch_axis_remain) + .sum(Eigen::DSizes(1))); } else { - const int class_num = prob->dims()[1]; const T* prob_data = prob->data(); T* loss_data = out->data(); const int64_t* label_data = labels->data(); for (int i = 0; i < batch_size; ++i) { - int lbl = label_data[i]; - PADDLE_ENFORCE_GE(lbl, 0); - PADDLE_ENFORCE_LT(lbl, class_num); - PADDLE_ENFORCE((lbl >= 0 && lbl < class_num) || lbl == ignore_index); - int index = i * class_num + lbl; - loss_data[i] = - lbl == ignore_index - ? 0 - : -math::TolerableValue()(std::log(prob_data[index])); + for (int j = 0; j < num_remain; j++) { + int lbl = label_data[i * num_remain + j]; + PADDLE_ENFORCE((lbl >= 0 && lbl < axis_dim) || lbl == ignore_index); + int index = i * num_classes + lbl * num_remain + j; + int loss_idx = i * num_remain + j; + loss_data[loss_idx] = + lbl == ignore_index + ? 0 + : -math::TolerableValue()(std::log(prob_data[index])); + } } } } diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index 44cbdf2e9882195819bc3ca047dbac6e2fa4e631..5bc05257aa9d3db7881330ca4547da439dab03bd 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -57,8 +57,8 @@ class CrossEntropyFunctor { public: void operator()(const platform::CUDADeviceContext& ctx, framework::Tensor* out, const framework::Tensor* prob, - const framework::Tensor* labels, bool softLabel, - const int ignore_index) { + const framework::Tensor* labels, const bool softLabel, + const int ignore_index, const int axis_dim) { const T* prob_data = prob->data(); T* loss_data = out->mutable_data(ctx.GetPlace()); diff --git a/paddle/fluid/operators/math/cross_entropy.h b/paddle/fluid/operators/math/cross_entropy.h index 99a4935186e1e6f9e3bf36eb029ce3d230510117..48082a7273dd7ad713fbc964ebbd1445ed887cdd 100644 --- a/paddle/fluid/operators/math/cross_entropy.h +++ b/paddle/fluid/operators/math/cross_entropy.h @@ -60,7 +60,7 @@ class CrossEntropyFunctor { void operator()(const DeviceContext& context, framework::Tensor* out, const framework::Tensor* prob, const framework::Tensor* labels, const bool softLabel, - const int ignore_index); + const int ignore_index, const int axis_dim); }; } // namespace math } // namespace operators diff --git a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h index 6b57da1046a05b15b9c3302104d9f4d12c52227f..77d7ff57cda7416705bed7eb393366e1f87232a0 100644 --- a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h @@ -101,6 +101,122 @@ __global__ void KeGruForwardFinalOutput(OpFinalOutput op_final_output, output_value[frame_idx] = r_output; } +/* + * threads(tile_size, 1) + * grid(frame_blocks, 1) + */ +template +__global__ void KeFastCollectiveGruGate(T *gate_value, T *prev_output_value, + T *gate_weight, T *reset_output, + int frame_size, + ActivationType active_node) { + T xt_0 = 0.0f; + T a0 = 0.0f; + T c0 = 0.0f; + T b0[Tiled_size]; + + int COL = blockIdx.x * blockDim.x + threadIdx.x; + int Tiled_mask = ((1 << Tiled_size) - 1); + // Tiled matrix multiply using register shift, faster than sm. + if (prev_output_value) { + for (int k = 0; k < (((frame_size - 1) / Tiled_size) + 1); ++k) { + a0 = 0; + if ((threadIdx.x + k * Tiled_size) < frame_size) { + a0 = prev_output_value[threadIdx.x + (k * Tiled_size)]; + } + for (int i = 0; i < Tiled_size; i++) { + if (COL < frame_size * 2 && (i + k * Tiled_size) < frame_size) { + b0[i] = gate_weight[(i + k * Tiled_size) * frame_size * 2 + COL]; + } + } + + for (int i = 0; i < Tiled_size; ++i) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 + c0 = c0 + __shfl_sync(Tiled_mask, a0, i, Tiled_size) * b0[i]; +#else + c0 = c0 + __shfl(a0, i, Tiled_size) * b0[i]; +#endif + } + } + } + + __syncthreads(); + + if (COL < frame_size * 2) { + xt_0 = gate_value[COL]; + c0 += xt_0; + c0 = forward::activation(c0, active_node); + gate_value[COL] = c0; + if (frame_size <= COL && COL < frame_size * 2) { + T htp_0 = 0.0; + if (prev_output_value) { + htp_0 = prev_output_value[COL - frame_size]; + } + reset_output[COL - frame_size] = c0 * htp_0; + } else if (COL < frame_size) { + gate_value[COL] = c0; + } + } +} + +/* + * threads(tile_size, 1) + * grid(frame_blocks, 1) + */ +template +__global__ void KeFastCollectiveGruOut(T *gate_weight, T *prev_out_value, + T *output_value, T *gate_value, + T *reset_value, int frame_size, + ActivationType act_node, + bool origin_mode) { + int COL = blockIdx.x * blockDim.x + threadIdx.x; + + T a0 = 0.0f; + T b0[Tiled_size]; + T c0 = 0.0f; + + int Tiled_mask = ((1 << Tiled_size) - 1); + //- Tiled matrix multiply with register shift + if (prev_out_value) { + for (int k = 0; k < (((frame_size - 1) / Tiled_size) + 1); ++k) { + a0 = 0; + if ((threadIdx.x + k * Tiled_size) < frame_size) { + a0 = reset_value[threadIdx.x + (k * Tiled_size)]; + } + for (int i = 0; i < Tiled_size; i++) { + if (COL < frame_size && (i + k * Tiled_size) < frame_size) { + b0[i] = gate_weight[(i + k * Tiled_size) * frame_size + COL]; + } + } + + for (int i = 0; i < Tiled_size; ++i) { +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 + c0 = c0 + __shfl_sync(Tiled_mask, a0, i, Tiled_size) * b0[i]; +#else + c0 = c0 + __shfl(a0, i, Tiled_size) * b0[i]; +#endif + } + } + } + + __syncthreads(); + + if (COL < frame_size) { + T xt_0 = gate_value[COL + 2 * frame_size]; + T gta_0 = gate_value[COL]; + T htp_0 = 0; + if (prev_out_value) htp_0 = prev_out_value[COL]; + c0 += xt_0; + c0 = forward::activation(c0, act_node); + gate_value[COL + 2 * frame_size] = c0; + if (origin_mode) { + output_value[COL] = htp_0 * gta_0 + (1 - gta_0) * c0; + } else { + output_value[COL] = c0 * gta_0 + (1 - gta_0) * htp_0; + } + } +} + /* * threads(frame_per_block, batch_per_block) * grid(frame_blocks, batch_blocks) diff --git a/paddle/fluid/operators/math/gru_compute.cu b/paddle/fluid/operators/math/gru_compute.cu index ec7e4d2228c38161bb1f3f97ec21b91db454adb4..75417cced237c48dda1f6e87c0647b10a66d0907 100644 --- a/paddle/fluid/operators/math/gru_compute.cu +++ b/paddle/fluid/operators/math/gru_compute.cu @@ -30,10 +30,25 @@ struct GRUUnitFunctor { dim3 threads; dim3 grid; if (batch_size == 1) { - int frame_per_block = frame_size <= 1024 ? frame_size : 1024; - int frame_blocks = (frame_size + 1024 - 1) / 1024; - threads = dim3(frame_per_block, 1); + constexpr int tiled_size = 16; + int frame_blocks = (frame_size * 2 + tiled_size - 1) / tiled_size; + threads = dim3(tiled_size, 1); grid = dim3(frame_blocks, 1); + + detail::KeFastCollectiveGruGate<<>>( + value.gate_value, value.prev_out_value, value.gate_weight, + value.reset_output_value, frame_size, active_gate); + + frame_blocks = (frame_size + tiled_size - 1) / tiled_size; + grid = dim3(frame_blocks, 1); + detail::KeFastCollectiveGruOut<<>>( + value.state_weight, value.prev_out_value, value.output_value, + value.gate_value, value.reset_output_value, frame_size, active_node, + origin_mode); + + return; } else { threads = dim3(32, 32); grid = dim3((frame_size + 32 - 1) / 32, (batch_size + 32 - 1) / 32); diff --git a/paddle/fluid/operators/ngraph/ops/lrn_op.h b/paddle/fluid/operators/ngraph/ops/lrn_op.h new file mode 100644 index 0000000000000000000000000000000000000000..68a0eea08928ae6bfdae50bef9a3f5c2fddde9c8 --- /dev/null +++ b/paddle/fluid/operators/ngraph/ops/lrn_op.h @@ -0,0 +1,54 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include + +#include "ngraph/ngraph.hpp" +#include "paddle/fluid/operators/ngraph/ops/op_bridge.h" +#include "paddle/fluid/platform/ngraph_helper.h" + +namespace paddle { +namespace operators { +namespace ngraphs { +static void BuildLrnNode( + const std::shared_ptr& op, + std::shared_ptr< + std::unordered_map>> + ngb_node_map) { + auto input = platform::GetInputNode(op, "X", ngb_node_map); + + auto op_attrs = framework::AttrReader(op->Attrs()); + const int n = op_attrs.Get("n"); + const float alpha = op_attrs.Get("alpha") * static_cast(n); + const float beta = op_attrs.Get("beta"); + const float k = op_attrs.Get("k"); + + auto lrn_out = std::make_shared(input, alpha, beta, k, n); + std::shared_ptr mid_out = paddle::platform::CreateConstant( + input->get_element_type(), input->get_shape(), {k}); + + platform::SetOutputNode(op, "MidOut", mid_out, ngb_node_map); + platform::SetOutputNode(op, "Out", lrn_out, ngb_node_map); +} + +} // namespace ngraphs +} // namespace operators +} // namespace paddle + +REGISTER_NG_OP(lrn, BuildLrnNode); diff --git a/paddle/fluid/operators/ngraph/ops/softmax_op.h b/paddle/fluid/operators/ngraph/ops/softmax_op.h index 6eb84703998c24ee7b9e0d4f6931c3fe0bd00e2e..e1f6e8d3cfdc56c00229bbe1c3b183c309d0394e 100644 --- a/paddle/fluid/operators/ngraph/ops/softmax_op.h +++ b/paddle/fluid/operators/ngraph/ops/softmax_op.h @@ -27,38 +27,38 @@ namespace paddle { namespace operators { namespace ngraphs { -std::shared_ptr GetSoftmax(std::shared_ptr x) { +std::shared_ptr GetSoftmax(std::shared_ptr x, + int axis = -1) { auto x_shape = x->get_shape(); - int rank = x_shape.size(); - auto x_2d_shape = paddle::platform::FlattenTo2d(x_shape, rank - 1); - x = paddle::platform::NgReshaper(x, x_2d_shape); + size_t rank = x_shape.size(); + size_t softmax_axis = axis; + if (axis < 0) softmax_axis = rank + axis; - auto x_max = std::make_shared(x, ngraph::AxisSet{1}); + auto x_max = + std::make_shared(x, ngraph::AxisSet{softmax_axis}); auto x_max_bcast = std::make_shared( - x_max, x_2d_shape, ngraph::AxisSet{1}); + x_max, x_shape, ngraph::AxisSet{softmax_axis}); auto x_shifted = x - x_max_bcast; auto x_clipped = paddle::operators::ngraphs::ElementwiseScalar( -64., x_shifted); - auto softmax = - std::make_shared(x_clipped, ngraph::AxisSet{1}); + auto softmax = std::make_shared( + x_clipped, ngraph::AxisSet{softmax_axis}); return softmax; } -std::shared_ptr GetSoftmaxGrad( - std::shared_ptr out, std::shared_ptr dout) { +std::shared_ptr GetSoftmaxGrad(std::shared_ptr out, + std::shared_ptr dout, + int axis = -1) { auto out_shape = out->get_shape(); - int rank = out_shape.size(); - auto out_2d_shape = paddle::platform::FlattenTo2d(out_shape, rank - 1); - auto dout_2d_shape = - paddle::platform::FlattenTo2d(dout->get_shape(), rank - 1); - out = paddle::platform::NgReshaper(out, out_2d_shape); - dout = paddle::platform::NgReshaper(dout, dout_2d_shape); + size_t rank = out_shape.size(); + size_t softmax_axis = axis; + if (axis < 0) softmax_axis = rank + axis; - auto node_sum = - std::make_shared(out * dout, ngraph::AxisSet{1}); + auto node_sum = std::make_shared( + out * dout, ngraph::AxisSet{softmax_axis}); auto node_bcast = std::make_shared( - node_sum, out_2d_shape, ngraph::AxisSet{1}); + node_sum, out_shape, ngraph::AxisSet{softmax_axis}); auto dx = (dout - node_bcast) * out; return dx; } @@ -68,8 +68,9 @@ void BuildSoftmaxNode( std::shared_ptr< std::unordered_map>> ngb_node_map) { + auto op_attrs = framework::AttrReader(op->Attrs()); auto x = paddle::platform::GetInputNode(op, "X", ngb_node_map); - auto softmax = GetSoftmax(x); + auto softmax = GetSoftmax(x, op_attrs.Get("axis")); paddle::platform::SetOutputNode(op, "Out", softmax, ngb_node_map); } @@ -78,9 +79,10 @@ void BuildSoftmaxGradNode( std::shared_ptr< std::unordered_map>> ngb_node_map) { + auto op_attrs = framework::AttrReader(op->Attrs()); auto out = paddle::platform::GetInputNode(op, "Out", ngb_node_map); auto dout = paddle::platform::GetInputNode(op, "Out@GRAD", ngb_node_map); - auto dx = GetSoftmaxGrad(out, dout); + auto dx = GetSoftmaxGrad(out, dout, op_attrs.Get("axis")); paddle::platform::SetOutputNode(op, "X@GRAD", dx, ngb_node_map); } } // namespace ngraphs diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 371ab0384a3fa2ff22ac4e5c3d1e54aff237b47d..456f78d2022e471bf8d35be542b9cf8347a7a944 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -26,23 +26,28 @@ class SoftmaxWithCrossEntropyOpMaker public: void Make() override { AddInput("Logits", - "(Tensor, default: Tensor), The unscaled log probabilities " - "which is a 2-D tensor with shape [N x K]. N is the batch_size, " - "and K is the class number."); - AddInput("Label", - "(Tensor) The ground truth which is a 2-D tensor. If soft_label " - "is set to false, Label is a Tensor with shape [N x 1]. If " - "soft_label is set to true, Label is a Tensor with " - "shape [N x K]."); + "(Tensor, default: Tensor), The input tensor of unscaled " + "log probabilities, whose dimension :attr:`axis` should be scaled " + "by softmax."); + AddInput( + "Label", + "(Tensor) The input tesnor of groud truth label. If :attr:`soft_label` " + "is set to false, Label is a Tensor in same shape with " + "Input(Logits) except the shape in dimension :attr:`axis` as 1. If " + "soft_label is set to true, Label is a Tensor in same " + "shape with Input(Logits)."); AddOutput( "Softmax", - "(Tensor, default: Tensor), A 2-D tensor with shape [N x K]. " + "(Tensor, default: Tensor), A tensor in same shape with " + "Input(Logits). " "The outputs value of softmax activation by given the input batch, " "which will be used in backward calculation.") .AsIntermediate(); AddOutput("Loss", - "(Tensor, default: Tensor), A 2-D tensor. The cross " - "entropy loss with shape [N x 1]."); + "(Tensor, default: Tensor), A tensor in same shape with " + "Input(Logits) " + "except the shape in dimension :attr:`axis` as 1. The cross " + "entropy loss."); AddAttr( "soft_label", "(bool, default: false), A flag to indicate whether to interpretate " @@ -60,6 +65,10 @@ class SoftmaxWithCrossEntropyOpMaker "does not contribute to the input gradient. Only valid if soft_label" "is set to False") .SetDefault(-100); + AddAttr("axis", + "The dimension index of Input(Logits) to perform softmax," + "default -1 for last dimension") + .SetDefault(-1); AddComment(R"DOC( Softmax With Cross Entropy Operator. @@ -107,38 +116,53 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { "Output(Softmax) should be not null."); PADDLE_ENFORCE(ctx->HasOutput("Loss"), "Output(Loss) should be not null."); + auto axis = ctx->Attrs().Get("axis"); auto logits_dims = ctx->GetInputDim("Logits"); auto labels_dims = ctx->GetInputDim("Label"); + auto logits_rank = logits_dims.size(); + PADDLE_ENFORCE(axis >= -logits_rank && axis < logits_rank, + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(Logits)."); + + axis = CanonicalAxis(axis, logits_rank); + for (int i = 0; i < logits_rank; i++) { + if (i != axis) { + if (ctx->IsRuntime() || (logits_dims[i] > 0 && labels_dims[i] > 0)) { + PADDLE_ENFORCE_EQ( + logits_dims[i], labels_dims[i], + "Input(Logits) and Input(Label) should in same shape in " + "dimensions except axis."); + } + } + } - int rank = logits_dims.size(); - PADDLE_ENFORCE_EQ( - rank, labels_dims.size(), - "Input(logits) and Input(Label) shall have the same rank."); - bool check = ctx->IsRuntime() || (framework::product(logits_dims) > 0 && - framework::product(labels_dims) > 0); - if (check) { - PADDLE_ENFORCE_EQ(framework::slice_ddim(logits_dims, 0, rank - 1), - framework::slice_ddim(labels_dims, 0, rank - 1), - "Input(X) and Input(Label) shall have the same shape " - "except the last dimension."); + auto numeric_stable_mode = ctx->Attrs().Get("numeric_stable_mode"); + if (axis != logits_rank - 1) { + PADDLE_ENFORCE( + numeric_stable_mode, + "Attr(axis) can only be -1 when not in numeric_stable_mode."); } - if (ctx->Attrs().Get("soft_label")) { - if (check) { - PADDLE_ENFORCE_EQ(logits_dims[rank - 1], labels_dims[rank - 1], - "If Attr(soft_label) == true, the last dimension of " + bool soft_label = ctx->Attrs().Get("soft_label"); + if (soft_label) { + if (ctx->IsRuntime() || + (logits_dims[axis] > 0 && labels_dims[axis] > 0)) { + PADDLE_ENFORCE_EQ(logits_dims[axis], labels_dims[axis], + "If Attr(soft_label) == true, the axis dimension of " "Input(X) and Input(Label) should be equal."); } } else { - PADDLE_ENFORCE_EQ(labels_dims[rank - 1], 1UL, - "If Attr(softLabel) == false, the last dimension of " - "Input(Label) should be 1."); + if (ctx->IsRuntime() || labels_dims[axis] > 0) { + PADDLE_ENFORCE_EQ(labels_dims[axis], 1UL, + "If Attr(soft_label) == false, the axis dimension of " + "Input(Label) should be 1."); + } } ctx->SetOutputDim("Softmax", logits_dims); - auto loss_dims = logits_dims; - loss_dims[rank - 1] = 1; - ctx->SetOutputDim("Loss", loss_dims); + + logits_dims[axis] = 1; + ctx->SetOutputDim("Loss", logits_dims); ctx->ShareLoD("Logits", /*->*/ "Softmax"); ctx->ShareLoD("Logits", /*->*/ "Loss"); @@ -165,36 +189,40 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")), "Output(Logits@Grad) should be not null."); + auto axis = ctx->Attrs().Get("axis"); auto softmax_dims = ctx->GetInputDim("Softmax"); auto labels_dims = ctx->GetInputDim("Label"); - - int rank = softmax_dims.size(); - PADDLE_ENFORCE_EQ( - rank, labels_dims.size(), - "Input(logits) and Input(Label) shall have the same rank."); - bool check = true; - if ((!ctx->IsRuntime()) && (framework::product(softmax_dims) <= 0 || - framework::product(labels_dims) <= 0)) { - check = false; - } - if (check) { - PADDLE_ENFORCE_EQ( - framework::slice_ddim(softmax_dims, 0, rank - 1), - framework::slice_ddim(labels_dims, 0, rank - 1), - "Input(Softmax) and Input(Label) shall have the same shape " - "except the last dimension."); + auto softmax_rank = softmax_dims.size(); + PADDLE_ENFORCE(axis >= -softmax_rank && axis < softmax_rank, + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(Logits)."); + + axis = CanonicalAxis(axis, softmax_rank); + for (int i = 0; i < softmax_rank; i++) { + if (i != axis) { + if (ctx->IsRuntime() || (softmax_dims[i] > 0 && labels_dims[i] > 0)) { + PADDLE_ENFORCE_EQ( + softmax_dims[i], labels_dims[i], + "Input(Logits) and Input(Label) should in same shape in " + "dimensions except axis."); + } + } } - if (ctx->Attrs().Get("soft_label")) { - if (check) { - PADDLE_ENFORCE_EQ(softmax_dims[rank - 1], labels_dims[rank - 1], - "If Attr(soft_label) == true, the last dimension of " - "Input( Softmax) and Input(Label) should be equal."); + bool soft_label = ctx->Attrs().Get("soft_label"); + if (soft_label) { + if (ctx->IsRuntime() || + (softmax_dims[axis] > 0 && labels_dims[axis] > 0)) { + PADDLE_ENFORCE_EQ(softmax_dims[axis], labels_dims[axis], + "If Attr(soft_label) == true, the axis dimension of " + "Input(X) and Input(Label) should be equal."); } } else { - PADDLE_ENFORCE_EQ(labels_dims[rank - 1], 1UL, - "If Attr(softLabel) == false, the last dimension of " - "Input(Label) should be 1."); + if (ctx->IsRuntime() || labels_dims[axis] > 0) { + PADDLE_ENFORCE_EQ(labels_dims[axis], 1UL, + "If Attr(soft_label) == false, the axis dimension of " + "Input(Label) should be 1."); + } } ctx->SetOutputDim(framework::GradVarName("Logits"), @@ -233,11 +261,7 @@ class SoftmaxWithCrossEntropyInplaceInference public: std::unordered_map operator()( const framework::OpDesc& op_desc, bool use_cuda) const { - if (use_cuda && !boost::get(op_desc.GetAttr("soft_label"))) { - return {{"Logits", "Softmax"}}; - } else { - return {}; - } + return {{"Logits", "Softmax"}}; } }; diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index dc5ec7bc38cb60d15f796f6523b920b6696510cd..12b64052a7cd63be5bcd6be7c313111fb0727b5f 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include #include "paddle/fluid/operators/math/cross_entropy.h" +#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" #include "paddle/fluid/platform/for_range.h" @@ -21,11 +22,13 @@ using Tensor = framework::Tensor; namespace { template __global__ void CrossEntropyGrad(T* logit_grad, const int64_t* labels, - const int batch_size, const int class_num, + const int n, const int d, const int remain, const int ignore_index) { - for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < batch_size; + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n * remain; i += blockDim.x * gridDim.x) { - int idx = i * class_num + labels[i]; + int idx_n = i / remain; + int idx_remain = i % remain; + int idx = idx_n * d + labels[i] * remain + idx_remain; logit_grad[idx] -= ignore_index == labels[i] ? static_cast(0.) : static_cast(1.); } @@ -33,23 +36,26 @@ __global__ void CrossEntropyGrad(T* logit_grad, const int64_t* labels, template __global__ void Scale(T* logit_grad, const T* loss_grad, const int num, - const int class_num) { + const int d, const int remain) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) { - logit_grad[i] *= loss_grad[i / class_num]; + int idx_n = i / d; + int idx_remain = i % remain; + logit_grad[i] *= loss_grad[idx_n * remain + idx_remain]; } } template __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, - const T* labels, - const int batch_size, - const int class_num) { + const T* labels, const int n, + const int d, const int remain) { int ids = blockIdx.x * blockDim.x + threadIdx.x; - if (ids < batch_size * class_num) { - int row_ids = ids / class_num; - logit_grad[ids] = loss_grad[row_ids] * (logit_grad[ids] - labels[ids]); + if (ids < n * d) { + int idx_n = ids / d; + int idx_remain = ids % remain; + int idx_loss = idx_n * remain + idx_remain; + logit_grad[ids] = loss_grad[idx_loss] * (logit_grad[ids] - labels[ids]); } } @@ -116,23 +122,30 @@ using BlockReduce = template using BlockReduceTempStorage = typename BlockReduce::TempStorage; -// Make sure that BlockDim <= feature_size +// Make sure that BlockDim <= axis_dim // This kernel is used to calculate the max element of each row template static __global__ void RowReductionForMax(const T* logits_data, T* max_data, - int feature_size) { + int d, int axis_dim) { __shared__ BlockReduceTempStorage temp_storage; - auto beg_idx = feature_size * blockIdx.x + threadIdx.x; - auto end_idx = feature_size * (blockIdx.x + 1); + // logits_data view as [n, axis_dim, remain] + // max_data view as [n, 1, remain] + // blockDim = n * remain, split blockIdx to idx_n and idx_remain + int remain = d / axis_dim; + int idx_n = blockIdx.x / remain; + int idx_remain = blockIdx.x % remain; + int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; + int end_idx = (idx_n + 1) * d; + int step = BlockDim * remain; T cur_max = logits_data[beg_idx]; - beg_idx += BlockDim; + beg_idx += step; while (beg_idx < end_idx) { if (cur_max < logits_data[beg_idx]) { cur_max = logits_data[beg_idx]; } - beg_idx += BlockDim; + beg_idx += step; } cur_max = BlockReduce(temp_storage).Reduce(cur_max, cub::Max()); @@ -143,25 +156,32 @@ static __global__ void RowReductionForMax(const T* logits_data, T* max_data, } } -// Make sure that BlockDim <= feature_size +// Make sure that BlockDim <= axis_dim template static __global__ void RowReductionForDiffMaxSum(const T* logits_data, - T* max_data, T* softmax, - int feature_size) { + T* max_data, T* softmax, int d, + int axis_dim) { __shared__ BlockReduceTempStorage temp_storage; - auto beg_idx = feature_size * blockIdx.x + threadIdx.x; - auto end_idx = feature_size * (blockIdx.x + 1); + // logits, softmax data view as [n, axis_dim, remain] + // max_data view as [n, 1, remain] + // blockDim = n * remain, split blockIdx to idx_n and idx_remain + int remain = d / axis_dim; + int idx_n = blockIdx.x / remain; + int idx_remain = blockIdx.x % remain; + int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; + int end_idx = (idx_n + 1) * d; auto block_max = max_data[blockIdx.x]; + int step = BlockDim * remain; softmax[beg_idx] = logits_data[beg_idx] - block_max; T diff_max_sum = exp_on_device(softmax[beg_idx]); - auto idx = beg_idx + BlockDim; + auto idx = beg_idx + step; while (idx < end_idx) { softmax[idx] = logits_data[idx] - block_max; diff_max_sum += exp_on_device(softmax[idx]); - idx += BlockDim; + idx += step; } diff_max_sum = @@ -172,34 +192,42 @@ static __global__ void RowReductionForDiffMaxSum(const T* logits_data, __syncthreads(); diff_max_sum = max_data[blockIdx.x]; softmax[beg_idx] -= diff_max_sum; - beg_idx += BlockDim; + beg_idx += step; while (beg_idx < end_idx) { softmax[beg_idx] -= diff_max_sum; - beg_idx += BlockDim; + beg_idx += step; } if (threadIdx.x == 0) max_data[blockIdx.x] = 0; } -// Make sure that BlockDim <= feature_size +// Make sure that BlockDim <= axis_dim template static __global__ void RowReductionForSoftmaxAndCrossEntropy( - const T* labels_data, T* loss_data, T* softmax, int feature_size) { + const T* logits_data, const T* labels_data, T* loss_data, T* softmax, int d, + int axis_dim) { __shared__ BlockReduceTempStorage temp_storage; - auto beg_idx = feature_size * blockIdx.x + threadIdx.x; - auto end_idx = feature_size * (blockIdx.x + 1); + // logits, softmax, labels data view as [n, axis_dim, remain] + // loss_data view as [n, 1, remain] + // blockDim = n * remain, split blockIdx to idx_n and idx_remain + int remain = d / axis_dim; + int idx_n = blockIdx.x / remain; + int idx_remain = blockIdx.x % remain; + int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; + int end_idx = (idx_n + 1) * d; // log_diff_max_sum shares memory with loss auto block_log_diff_max_sum = loss_data[blockIdx.x]; auto tmp = softmax[beg_idx] - block_log_diff_max_sum; softmax[beg_idx] = exp_on_device(tmp); auto loss = -labels_data[beg_idx] * tmp; - beg_idx += BlockDim; + int step = BlockDim * remain; + beg_idx += step; while (beg_idx < end_idx) { tmp = softmax[beg_idx] - block_log_diff_max_sum; softmax[beg_idx] = exp_on_device(tmp); loss -= (labels_data[beg_idx] * tmp); - beg_idx += BlockDim; + beg_idx += step; } loss = BlockReduce(temp_storage).Reduce(loss, cub::Sum()); @@ -210,21 +238,27 @@ template struct HardLabelSoftmaxWithCrossEntropyFunctor { public: HardLabelSoftmaxWithCrossEntropyFunctor(const int64_t* labels, T* loss, - T* log_softmax, int feature_size) + T* log_softmax, int d, int axis_dim) : labels_(labels), loss_(loss), log_softmax_(log_softmax), - feature_size_(feature_size) {} + d_(d), + axis_dim_(axis_dim) {} __device__ void operator()(int idx) const { - auto row_idx = idx / feature_size_; - auto col_idx = idx % feature_size_; - if (col_idx != labels_[row_idx]) { + // logits view as [n, axis_dim, remain], where d = axis_dim * remain + int remain = d_ / axis_dim_; + int idx_n = idx / d_; + int idx_axis = (idx % d_) / remain; + int idx_remain = idx % remain; + // labels, loss view as [n, remain] + int idx_lbl = idx_n * remain + idx_remain; + if (idx_axis != labels_[idx_lbl]) { log_softmax_[idx] = exp_on_device(log_softmax_[idx]); } else { auto softmax = log_softmax_[idx]; log_softmax_[idx] = exp_on_device(softmax); - loss_[row_idx] = -softmax; + loss_[idx_lbl] = -softmax; } } @@ -232,7 +266,8 @@ struct HardLabelSoftmaxWithCrossEntropyFunctor { const int64_t* labels_; T* loss_; T* log_softmax_; - int feature_size_; + int d_; + int axis_dim_; }; template @@ -240,23 +275,29 @@ struct HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx { public: HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx(const int64_t* labels, T* loss, T* log_softmax, - int feature_size, + int d, int axis_dim, int ignore_idx) : labels_(labels), loss_(loss), log_softmax_(log_softmax), - feature_size_(feature_size), + d_(d), + axis_dim_(axis_dim), ignore_idx_(ignore_idx) {} __device__ void operator()(int idx) const { - auto row_idx = idx / feature_size_; - auto col_idx = idx % feature_size_; - if (col_idx != labels_[row_idx] || col_idx == ignore_idx_) { + // logits view as [n, axis_dim, remain], where d = axis_dim * remain + int remain = d_ / axis_dim_; + int idx_n = idx / d_; + int idx_axis = (idx % d_) / remain; + int idx_remain = idx % remain; + // labels, loss view as [n, remain] + int idx_lbl = idx_n * remain + idx_remain; + if (idx_axis != labels_[idx_lbl] || idx_axis == ignore_idx_) { log_softmax_[idx] = exp_on_device(log_softmax_[idx]); } else { auto softmax = log_softmax_[idx]; log_softmax_[idx] = exp_on_device(softmax); - loss_[row_idx] = -softmax; + loss_[idx_lbl] = -softmax; } } @@ -264,44 +305,38 @@ struct HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx { const int64_t* labels_; T* loss_; T* log_softmax_; - int feature_size_; + int d_; + int axis_dim_; int ignore_idx_; }; -template -static __global__ void SetSoftmaxToOneWhenFeatureSizeIsOne(T* out, - int batch_size) { - auto idx = threadIdx.x + blockIdx.x * blockDim.x; - if (idx < batch_size) out[idx] = static_cast(1); -} - template static void HardLabelSoftmaxWithCrossEntropy( const platform::CUDADeviceContext& ctx, const T* logits_data, - const int64_t* labels_data, T* loss_data, T* softmax_data, int batch_size, - int feature_size, int ignore_idx) { + const int64_t* labels_data, T* loss_data, T* softmax_data, int n, int d, + int axis_dim, int ignore_idx) { constexpr int kMaxBlockDim = 512; - int block_dim = feature_size >= kMaxBlockDim + int block_dim = axis_dim >= kMaxBlockDim ? kMaxBlockDim - : (1 << static_cast(std::log2(feature_size))); + : (1 << static_cast(std::log2(axis_dim))); + int grid_dim = n * d / axis_dim; auto stream = ctx.stream(); -#define CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ - case BlockDim: { \ - RowReductionForMax<<>>( \ - logits_data, loss_data, feature_size); \ - RowReductionForDiffMaxSum<<>>( \ - logits_data, loss_data, softmax_data, feature_size); \ - platform::ForRange for_range( \ - ctx, batch_size* feature_size); \ - if (ignore_idx >= 0 && ignore_idx < feature_size) { \ - for_range(HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx( \ - labels_data, loss_data, softmax_data, feature_size, ignore_idx)); \ - } else { \ - for_range(HardLabelSoftmaxWithCrossEntropyFunctor( \ - labels_data, loss_data, softmax_data, feature_size)); \ - } \ +#define CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ + case BlockDim: { \ + RowReductionForMax<<>>( \ + logits_data, loss_data, d, axis_dim); \ + RowReductionForDiffMaxSum<<>>( \ + logits_data, loss_data, softmax_data, d, axis_dim); \ + platform::ForRange for_range(ctx, n* d); \ + if (ignore_idx >= 0 && ignore_idx < axis_dim) { \ + for_range(HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx( \ + labels_data, loss_data, softmax_data, d, axis_dim, ignore_idx)); \ + } else { \ + for_range(HardLabelSoftmaxWithCrossEntropyFunctor( \ + labels_data, loss_data, softmax_data, d, axis_dim)); \ + } \ } break switch (block_dim) { @@ -314,13 +349,6 @@ static void HardLabelSoftmaxWithCrossEntropy( CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); - case 1: - SetSoftmaxToOneWhenFeatureSizeIsOne<<<(batch_size + kMaxBlockDim - 1) / - kMaxBlockDim, - kMaxBlockDim, 0, stream>>>( - softmax_data, batch_size); - cudaMemsetAsync(loss_data, 0, batch_size * sizeof(T), stream); - break; default: PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op"); break; @@ -332,23 +360,23 @@ template static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, const T* labels_data, T* softmax_data, T* loss_data, - int batch_size, int feature_size, + int n, int d, int axis_dim, cudaStream_t stream) { constexpr int kMaxBlockDim = 512; - int block_dim = feature_size >= kMaxBlockDim + int block_dim = axis_dim >= kMaxBlockDim ? kMaxBlockDim - : (1 << static_cast(std::log2(feature_size))); - -#define CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ - case BlockDim: \ - RowReductionForMax<<>>( \ - logits_data, loss_data, feature_size); \ - RowReductionForDiffMaxSum<<>>( \ - logits_data, loss_data, softmax_data, feature_size); \ - RowReductionForSoftmaxAndCrossEntropy< \ - T, BlockDim><<>>( \ - labels_data, loss_data, softmax_data, feature_size); \ + : (1 << static_cast(std::log2(axis_dim))); + int grid_dim = n * d / axis_dim; + +#define CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ + case BlockDim: \ + RowReductionForMax<<>>( \ + logits_data, loss_data, d, axis_dim); \ + RowReductionForDiffMaxSum<<>>( \ + logits_data, loss_data, softmax_data, d, axis_dim); \ + RowReductionForSoftmaxAndCrossEntropy< \ + T, BlockDim><<>>( \ + logits_data, labels_data, loss_data, softmax_data, d, axis_dim); \ break switch (block_dim) { @@ -361,13 +389,6 @@ static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); - case 1: - SetSoftmaxToOneWhenFeatureSizeIsOne<<<(batch_size + kMaxBlockDim - 1) / - kMaxBlockDim, - kMaxBlockDim, 0, stream>>>( - softmax_data, batch_size); - cudaMemsetAsync(loss_data, 0, batch_size * sizeof(T), stream); - break; default: PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op"); break; @@ -385,51 +406,53 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { const Tensor* logits = context.Input("Logits"); const Tensor* labels = context.Input("Label"); Tensor* softmax = context.Output("Softmax"); - Tensor* loss = context.Output("Loss"); + + const int rank = logits->dims().size(); + const int axis = CanonicalAxis(context.Attr("axis"), rank); + int axis_dim = logits->dims()[axis]; + + if (axis_dim == 1) { + math::SetConstant set_constant; + set_constant(context.cuda_device_context(), softmax, static_cast(1)); + set_constant(context.cuda_device_context(), loss, static_cast(0)); + return; + } + + const int n = SizeToAxis(axis, logits->dims()); + const int d = SizeFromAxis(axis, logits->dims()); + auto* softmax_data = softmax->mutable_data(context.GetPlace()); auto* loss_data = loss->mutable_data(context.GetPlace()); auto soft_label = context.Attr("soft_label"); auto ignore_index = context.Attr("ignore_index"); - int rank = logits->dims().size(); if (soft_label) { - int batch_size = 1; - for (int i = 0; i < rank - 1; ++i) { - batch_size *= logits->dims()[i]; - } - - int feature_size = logits->dims()[rank - 1]; auto* logits_data = logits->data(); auto* labels_data = labels->data(); SoftmaxWithCrossEntropyFusedKernel( - logits_data, labels_data, softmax_data, loss_data, batch_size, - feature_size, context.cuda_device_context().stream()); + logits_data, labels_data, softmax_data, loss_data, n, d, axis_dim, + context.cuda_device_context().stream()); } else { if (!context.Attr("numeric_stable_mode")) { - // reshape to 2d - Tensor logits_2d = framework::ReshapeToMatrix(*logits, rank - 1); - Tensor softmax_2d = framework::ReshapeToMatrix(*softmax, rank - 1); - Tensor loss_2d = framework::ReshapeToMatrix(*loss, rank - 1); - Tensor labels_2d = framework::ReshapeToMatrix(*labels, rank - 1); - + // CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim + Tensor logits_2d, softmax_2d, labels_2d, loss_2d; + logits_2d.ShareDataWith(*logits).Resize({n, d}); + softmax_2d.ShareDataWith(*softmax).Resize({n, d}); + labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); + loss_2d.ShareDataWith(*loss).Resize({n, 1}); math::SoftmaxCUDNNFunctor()(context.cuda_device_context(), &logits_2d, &softmax_2d); math::CrossEntropyFunctor()( context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d, - false, ignore_index); + false, ignore_index, axis_dim); } else { - int batch_size = 1; - for (int i = 0; i < rank - 1; ++i) { - batch_size *= logits->dims()[i]; - } - int feature_size = logits->dims()[rank - 1]; auto* logits_data = logits->data(); auto* labels_data = labels->data(); HardLabelSoftmaxWithCrossEntropy( context.cuda_device_context(), logits_data, labels_data, loss_data, - softmax_data, batch_size, feature_size, ignore_index); + softmax_data, n, d, axis_dim, ignore_index); } } } @@ -453,30 +476,31 @@ class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel { } T* logit_grad_data = logit_grad->data(); - int rank = logit_grad->dims().size(); - int batch_size = 1; - for (int i = 0; i < rank - 1; ++i) { - batch_size *= logit_grad->dims()[i]; - } + const int rank = logit_grad->dims().size(); + const int axis = CanonicalAxis(context.Attr("axis"), rank); + int axis_dim = logit_grad->dims()[axis]; + + const int n = SizeToAxis(axis, logit_grad->dims()); + const int d = SizeFromAxis(axis, logit_grad->dims()); + const int remain = d / axis_dim; - const int class_num = logit_grad->dims()[rank - 1]; int block = 512; auto stream = context.cuda_device_context().stream(); auto ignore_index = context.Attr("ignore_index"); if (context.Attr("soft_label")) { - int grid = (batch_size * class_num + block - 1) / block; + int grid = (n * d + block - 1) / block; const T* label_data = labels->data(); SoftCrossEntropyGradientKernel<<>>( - logit_grad_data, loss_grad_data, label_data, batch_size, class_num); + logit_grad_data, loss_grad_data, label_data, n, d, remain); } else { - int grid = (batch_size + block - 1) / block; + int grid = (n * remain + block - 1) / block; const int64_t* label_data = labels->data(); CrossEntropyGrad<<>>( - logit_grad_data, label_data, batch_size, class_num, ignore_index); - int num = batch_size * class_num; + logit_grad_data, label_data, n, d, remain, ignore_index); + int num = n * d; grid = (num + block - 1) / block; Scale<<>>(logit_grad_data, loss_grad_data, num, - class_num); + d, remain); } } }; diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h index 7ef7c4f7424f2690f95fae0a70c1bdc6eb387502..4533295a8d8c0d7f36522143adc2820020179ace 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/softmax.h" +#include "paddle/fluid/operators/softmax_op.h" namespace paddle { namespace operators { @@ -36,26 +37,30 @@ class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { const Tensor* labels = context.Input("Label"); Tensor* softmax = context.Output("Softmax"); Tensor* loss = context.Output("Loss"); + const bool soft_label = context.Attr("soft_label"); + + const int rank = logits->dims().size(); + const int axis = CanonicalAxis(context.Attr("axis"), rank); + int axis_dim = logits->dims()[axis]; softmax->mutable_data(context.GetPlace()); loss->mutable_data(context.GetPlace()); - // reshape to 2D tensor - int rank = logits->dims().size(); - Tensor logits_2d = framework::ReshapeToMatrix(*logits, rank - 1); - Tensor labels_2d = framework::ReshapeToMatrix(*labels, rank - 1); - Tensor loss_2d = framework::ReshapeToMatrix(*loss, rank - 1); - Tensor softmax_2d = framework::ReshapeToMatrix(*softmax, rank - 1); - - int axis_dim = logits->dims()[rank - 1]; + const int n = SizeToAxis(axis, logits->dims()); + const int d = SizeFromAxis(axis, logits->dims()); + Tensor logits_2d, softmax_2d, labels_2d, loss_2d; + logits_2d.ShareDataWith(*logits).Resize({n, d}); + softmax_2d.ShareDataWith(*softmax).Resize({n, d}); + labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); + loss_2d.ShareDataWith(*loss).Resize({n, d / axis_dim}); auto& dev_ctx = context.template device_context(); math::SoftmaxFunctor()( dev_ctx, axis_dim, &logits_2d, &softmax_2d); math::CrossEntropyFunctor()( - dev_ctx, &loss_2d, &softmax_2d, &labels_2d, - context.Attr("soft_label"), context.Attr("ignore_index")); + dev_ctx, &loss_2d, &softmax_2d, &labels_2d, soft_label, + context.Attr("ignore_index"), axis_dim); } }; @@ -75,34 +80,43 @@ class SoftmaxWithCrossEntropyGradKernel : public framework::OpKernel { context.device_context(), logit_grad); } - int rank = logit_grad->dims().size(); - const int class_num = logit_grad->dims()[rank - 1]; - // reshape to 2d - Tensor logit_grad_2d = framework::ReshapeToMatrix(*logit_grad, rank - 1); - Tensor out_grad_2d = framework::ReshapeToMatrix(*out_grad, rank - 1); + const bool soft_label = context.Attr("soft_label"); + + const int rank = logit_grad->dims().size(); + const int axis = CanonicalAxis(context.Attr("axis"), rank); + int axis_dim = logit_grad->dims()[axis]; + + const int n = SizeToAxis(axis, logit_grad->dims()); + const int d = SizeFromAxis(axis, logit_grad->dims()); + Tensor logit_grad_2d, labels_2d, out_grad_2d; + logit_grad_2d.ShareDataWith(*logit_grad).Resize({n, d}); + labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); + out_grad_2d.ShareDataWith(*out_grad).Resize({n, d / axis_dim}); auto out_grad_mat = EigenMatrix::From(out_grad_2d); auto logit_grad_mat = EigenMatrix::From(logit_grad_2d); auto& place = *context.template device_context() .eigen_device(); - if (context.Attr("soft_label")) { - Tensor labels_2d = framework::ReshapeToMatrix(*labels, rank - 1); + if (soft_label) { auto lbl_mat = EigenMatrix::From(labels_2d); logit_grad_mat.device(place) = - out_grad_mat.broadcast(Eigen::DSizes(1, class_num)) * + out_grad_mat.broadcast(Eigen::DSizes(1, axis_dim)) * (logit_grad_mat - lbl_mat); } else { logit_grad_mat.device(place) = logit_grad_mat * - out_grad_mat.broadcast(Eigen::DSizes(1, class_num)); - - const int batch_size = logit_grad_2d.dims()[0]; + out_grad_mat.broadcast(Eigen::DSizes(1, axis_dim)); const int64_t* label_data = labels->data(); T* logit_grad_data = logit_grad->data(); const T* out_grad_data = out_grad->data(); - for (int i = 0; i < batch_size; ++i) { - logit_grad_data[i * class_num + label_data[i]] -= out_grad_data[i]; + const int remain = d / axis_dim; + for (int i = 0; i < n; ++i) { + for (int j = 0; j < remain; j++) { + int idx = i * remain + j; + logit_grad_data[i * d + label_data[idx] * remain + j] -= + out_grad_data[idx]; + } } } } diff --git a/paddle/fluid/operators/sum_op.cu b/paddle/fluid/operators/sum_op.cu index 6125ed07b6d0f92fa317c581a06117dcfa7359ae..5cecb7e09e7db2f3e4f63037352d3ee2b182ac3d 100644 --- a/paddle/fluid/operators/sum_op.cu +++ b/paddle/fluid/operators/sum_op.cu @@ -8,9 +8,242 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + +#include +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/sum_op.h" #include "paddle/fluid/platform/float16.h" +namespace plat = paddle::platform; + +namespace paddle { +namespace operators { + +#define CEIL_DIV(x, y) (((x) + (y)-1) / (y)) + +using LoDTensor = framework::LoDTensor; + +template +__global__ void Sum2CUDAKernel(const T *in_0, const T *in_1, T *out, + int64_t N) { + int id = blockIdx.x * blockDim.x + threadIdx.x; + while (id < N) { + out[id] = in_0[id] + in_1[id]; + id += blockDim.x * gridDim.x; + } +} + +template +__global__ void SumArrayCUDAKernel(T **in, T *out, int64_t N, size_t in_size, + bool read_dst) { + int id = blockIdx.x * blockDim.x + threadIdx.x; + while (id < N) { + T total(0); + for (int i = 0; i < in_size; ++i) { + const T *tmp = in[i]; + if (tmp) { + total += tmp[id]; + } + } + if (read_dst) { + out[id] += total; + } else { + out[id] = total; + } + id += blockDim.x * gridDim.x; + } +} + +template +__global__ void SumSelectedRowsCUDAKernel(T **sr_in_out, int64_t N, + size_t rows) { + int id = blockIdx.x * blockDim.x + threadIdx.x; + while (id < N) { + for (int i = 0; i < 2 * rows; i += 2) { + const T *tmp = sr_in_out[i]; + T *tmp_out = sr_in_out[i + 1]; + if (tmp && tmp_out) { + tmp_out[id] += tmp[id]; + } + } + id += blockDim.x * gridDim.x; + } +} + +template +__global__ void SumAlign4CUDAKernel(const T *in_0, const T *in_1, T *out, + int64_t N) { + int id = blockIdx.x * blockDim.x + threadIdx.x; + for (int i = id; i < N / 4; i += blockDim.x * gridDim.x) { + const float4 *in0_4 = reinterpret_cast(in_0); + const float4 *in1_4 = reinterpret_cast(in_1); + float4 tmp; + tmp.x = in0_4[i].x + in1_4[i].x; + tmp.y = in0_4[i].y + in1_4[i].y; + tmp.z = in0_4[i].z + in1_4[i].z; + tmp.w = in0_4[i].w + in1_4[i].w; + reinterpret_cast(out)[i] = tmp; + } +} + +template +void SumToLoDTensor(const framework::ExecutionContext &context) { + auto in_vars = context.MultiInputVar("X"); + const size_t in_num = in_vars.size(); + + constexpr size_t theory_sm_threads = 1024; + auto &dev_ctx = + context.template device_context(); + auto stream = dev_ctx.stream(); + + auto max_threads = dev_ctx.GetMaxPhysicalThreadCount(); + auto sm_count = max_threads / theory_sm_threads; + size_t tile_size = 0; + dim3 grids; + dim3 blocks; + + auto ComputeKernelParameter = [&](size_t length) { + if (length >= max_threads) + tile_size = 1024; + else if (length < max_threads && length > sm_count * 128) + tile_size = 512; + else if (length <= sm_count * 128) + tile_size = 256; + grids = dim3(CEIL_DIV(length, tile_size), 1, 1); + blocks = dim3(tile_size, 1, 1); + }; + + auto *out = context.Output("Out"); + bool in_place = in_vars[0] == context.OutputVar("Out"); + if (!in_place) { + out->mutable_data(context.GetPlace()); + } + + // Sum of two tensors + if (in_num == 2 && in_vars[0]->IsType() && + in_vars[1]->IsType()) { + auto &in_0 = in_vars[0]->Get(); + auto &in_1 = in_vars[1]->Get(); + + auto length = in_0.numel(); + if (length) { + auto result = EigenVector::Flatten(*out); + auto &place = *dev_ctx.eigen_device(); + auto in_0_e = EigenVector::Flatten(in_0); + auto in_1_e = EigenVector::Flatten(in_1); + result.device(place) = in_0_e + in_1_e; + } + return; + } + + int start = in_place ? 1 : 0; + if (!in_place) { + math::SetConstant constant_functor; + constant_functor( + context.template device_context(), out, + static_cast(0)); + } + + std::vector in_data; + std::vector selectrow_index; + int64_t lod_length = 0; + bool dst_write = false; + for (int i = start; i < in_num; ++i) { + if (in_vars[i]->IsType()) { + auto &in_i = in_vars[i]->Get(); + in_data.emplace_back(in_i.data()); + lod_length = in_i.numel(); + } else if (in_vars[i]->IsType()) { + selectrow_index.push_back(i); + } + } + + // compute select rows seperately. + if (!selectrow_index.empty()) { + std::vector sr_in_out_data; + size_t rows = 0; + int64_t length = 0; + for (auto index : selectrow_index) { + auto &sr = in_vars[index]->Get(); + auto &sr_value = sr.value(); + auto &sr_rows = sr.rows(); + + auto row_numel = sr_value.numel() / sr_rows.size(); + auto out_dims = out->dims(); + + PADDLE_ENFORCE_EQ(sr.height(), out_dims[0]); + PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height()); + + auto *sr_data = sr_value.data(); + auto *sr_out_data = out->data(); + rows += sr_rows.size(); + length = row_numel; + + for (size_t i = 0; i < sr_rows.size(); ++i) { + sr_in_out_data.emplace_back(&sr_data[i * row_numel]); + sr_in_out_data.emplace_back(&sr_out_data[sr_rows[i] * row_numel]); + } + } + if (!sr_in_out_data.empty()) { + auto tmp_sr_in_out_array = + platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate( + sr_in_out_data.size() * sizeof(T *)); + + memory::Copy(boost::get(dev_ctx.GetPlace()), + tmp_sr_in_out_array->ptr(), platform::CPUPlace(), + reinterpret_cast(sr_in_out_data.data()), + sr_in_out_data.size() * sizeof(T *), dev_ctx.stream()); + + T **sr_in_out_array_data = + reinterpret_cast(tmp_sr_in_out_array->ptr()); + + ComputeKernelParameter(length); + SumSelectedRowsCUDAKernel<<>>( + sr_in_out_array_data, length, rows); + dst_write = true; + } + } + // if indata not null, merge into one kernel call. + if (!in_data.empty()) { + auto tmp_in_array = + platform::DeviceTemporaryAllocator::Instance().Get(dev_ctx).Allocate( + in_data.size() * sizeof(T *)); + + memory::Copy(boost::get(dev_ctx.GetPlace()), + tmp_in_array->ptr(), platform::CPUPlace(), + reinterpret_cast(in_data.data()), + in_data.size() * sizeof(T *), dev_ctx.stream()); + + T **in_array_data = reinterpret_cast(tmp_in_array->ptr()); + ComputeKernelParameter(lod_length); + SumArrayCUDAKernel<<>>( + in_array_data, out->data(), lod_length, in_data.size(), + dst_write | in_place); + } +} + +template +class SumKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto out_var = context.OutputVar("Out"); + + if (out_var->IsType()) { + SumToLoDTensor(context); + } else if (out_var->IsType()) { + SelectedRowsCompute(context); + } else if (out_var->IsType()) { + LodTensorArrayCompute(context); + } else { + PADDLE_THROW("Unexpected branch, output variable type is %s", + framework::ToTypeName(out_var->Type())); + } + } +}; +} // namespace operators +} // namespace paddle + namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index a8b2df186dbfcb2a913e9532e2a475f1ad0d23a1..0d60947971ca441b8f6785a7724e0a530e8a8e92 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -27,6 +27,96 @@ template using EigenVector = framework::EigenVector; +template +void SelectedRowsCompute(const framework::ExecutionContext &context) { + auto in_vars = context.MultiInputVar("X"); + auto out_var = context.OutputVar("Out"); + bool in_place = out_var == in_vars[0]; + + if (in_place && in_vars.size() < 2) { + return; + } + + std::vector inputs; + SelectedRows temp_in0; + + if (in_place) { + auto &in0 = in_vars[0]->Get(); + temp_in0.set_height(in0.height()); + temp_in0.set_rows(in0.rows()); + framework::TensorCopy(in0.value(), in0.place(), context.device_context(), + temp_in0.mutable_value()); + inputs.push_back(&temp_in0); + for (size_t i = 1; i < in_vars.size(); ++i) { + auto &in = in_vars[i]->Get(); + if (in.rows().size() > 0) { + inputs.push_back(&in); + } + } + } else { + for (auto &in_var : in_vars) { + auto &in = in_var->Get(); + if (in.rows().size() > 0) { + inputs.push_back(&in_var->Get()); + } + } + } + + auto *out = context.Output("Out"); + out->mutable_rows()->clear(); + + bool has_data = false; + for (auto &in : inputs) { + if (in->rows().size() > 0) { + has_data = true; + break; + } + } + if (has_data) { + math::scatter::MergeAdd merge_add; + merge_add(context.template device_context(), inputs, out); + + out->SyncIndex(); + + } else { + // no data, just set a empty out tensor. + out->mutable_value()->mutable_data(framework::make_ddim({0}), + context.GetPlace()); + } +} + +template +void LodTensorArrayCompute(const framework::ExecutionContext &context) { + auto in_vars = context.MultiInputVar("X"); + auto out_var = context.OutputVar("Out"); + bool in_place = out_var == in_vars[0]; + auto &out_array = *out_var->GetMutable(); + for (size_t i = in_place ? 1 : 0; i < in_vars.size(); ++i) { + PADDLE_ENFORCE(in_vars[i]->IsType(), + "Only support all inputs are TensorArray"); + auto &in_array = in_vars[i]->Get(); + + for (size_t i = 0; i < in_array.size(); ++i) { + if (in_array[i].numel() != 0) { + if (i >= out_array.size()) { + out_array.resize(i + 1); + } + if (out_array[i].numel() == 0) { + framework::TensorCopy(in_array[i], in_array[i].place(), + context.device_context(), &out_array[i]); + out_array[i].set_lod(in_array[i].lod()); + } else { + PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); + auto in = EigenVector::Flatten(in_array[i]); + auto result = EigenVector::Flatten(out_array[i]); + result.device(*context.template device_context() + .eigen_device()) = result + in; + } + } + } + } +} + template class SumKernel : public framework::OpKernel { public: @@ -83,84 +173,9 @@ class SumKernel : public framework::OpKernel { } } } else if (out_var->IsType()) { - if (in_place && in_vars.size() < 2) { - return; - } - - std::vector inputs; - SelectedRows temp_in0; - - if (in_place) { - auto &in0 = in_vars[0]->Get(); - temp_in0.set_height(in0.height()); - temp_in0.set_rows(in0.rows()); - framework::TensorCopy(in0.value(), in0.place(), - context.device_context(), - temp_in0.mutable_value()); - inputs.push_back(&temp_in0); - for (size_t i = 1; i < in_vars.size(); ++i) { - auto &in = in_vars[i]->Get(); - if (in.rows().size() > 0) { - inputs.push_back(&in); - } - } - } else { - for (auto &in_var : in_vars) { - auto &in = in_var->Get(); - if (in.rows().size() > 0) { - inputs.push_back(&in_var->Get()); - } - } - } - - auto *out = context.Output("Out"); - out->mutable_rows()->clear(); - - bool has_data = false; - for (auto &in : inputs) { - if (in->rows().size() > 0) { - has_data = true; - break; - } - } - if (has_data) { - math::scatter::MergeAdd merge_add; - merge_add(context.template device_context(), inputs, - out); - - out->SyncIndex(); - - } else { - // no data, just set a empty out tensor. - out->mutable_value()->mutable_data(framework::make_ddim({0}), - context.GetPlace()); - } + SelectedRowsCompute(context); } else if (out_var->IsType()) { - auto &out_array = *out_var->GetMutable(); - for (size_t i = in_place ? 1 : 0; i < in_vars.size(); ++i) { - PADDLE_ENFORCE(in_vars[i]->IsType(), - "Only support all inputs are TensorArray"); - auto &in_array = in_vars[i]->Get(); - - for (size_t i = 0; i < in_array.size(); ++i) { - if (in_array[i].numel() != 0) { - if (i >= out_array.size()) { - out_array.resize(i + 1); - } - if (out_array[i].numel() == 0) { - framework::TensorCopy(in_array[i], in_array[i].place(), - context.device_context(), &out_array[i]); - out_array[i].set_lod(in_array[i].lod()); - } else { - PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); - auto in = EigenVector::Flatten(in_array[i]); - auto result = EigenVector::Flatten(out_array[i]); - result.device(*context.template device_context() - .eigen_device()) = result + in; - } - } - } - } + LodTensorArrayCompute(context); } else { PADDLE_THROW("Unexpected branch, output variable type is %s", framework::ToTypeName(out_var->Type())); diff --git a/paddle/fluid/operators/where_op.cc b/paddle/fluid/operators/where_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..3b53ebec0b250c7181968e37f996ec9ef5cf2a2c --- /dev/null +++ b/paddle/fluid/operators/where_op.cc @@ -0,0 +1,58 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/where_op.h" + +namespace paddle { +namespace operators { + +class WhereOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Condition"), + "Input(Condition) of WhereOp should not be null."); + PADDLE_ENFORCE( + ctx->GetInputDim("Condition").size() >= 1, + "Input(Condition) should have number of dimension at least 1"); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(OUt) of WhereOp should not be null."); + ctx->SetOutputDim("Out", {-1, ctx->GetInputDim("Condition").size()}); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + auto output_type = framework::proto::VarType::INT64; + return framework::OpKernelType(output_type, ctx.device_context()); + } +}; + +class WhereOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Condition", "A bool tensor whose rank is at least 1"); + AddOutput("Out", "An int64 tensor of rank 2"); + AddComment(R"DOC( + Return a int64 tensor with rank 2, specifying the coordinate of true element in `Condition`. +)DOC"); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(where, ops::WhereOp, ops::WhereOpMaker); +REGISTER_OP_CPU_KERNEL(where, ops::CPUWhereKernel); diff --git a/paddle/fluid/operators/where_op.cu b/paddle/fluid/operators/where_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..27682f869c73c760bf475489a8bdd57e39cfaea5 --- /dev/null +++ b/paddle/fluid/operators/where_op.cu @@ -0,0 +1,81 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/where_op.h" +#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { + +using CUDADeviceContext = paddle::platform::CUDADeviceContext; + +template +class CUDAWhereKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* condition = context.Input("Condition"); + auto* out = context.Output("Out"); + + // TODO(zhoukunsheng): Should optimize to ensure GPU is faster than CPU. + framework::Tensor cond_cpu; + framework::TensorCopy(*condition, platform::CPUPlace(), &cond_cpu); + + const bool* cond_data = cond_cpu.data(); + int64_t numel = cond_cpu.numel(); + auto dims = cond_cpu.dims(); + int rank = dims.size(); + + thrust::host_vector h_true_index; + for (int64_t i = 0; i < numel; i++) { + if (cond_data[i]) { + h_true_index.push_back(i); + } + } + thrust::device_vector d_true_index = h_true_index; + int* ptr_true_index = thrust::raw_pointer_cast(d_true_index.data()); + + size_t true_num = h_true_index.size(); + + out->Resize(framework::make_ddim({static_cast(true_num), rank})); + auto out_ptr = out->mutable_data(context.GetPlace()); + + if (true_num == 0) { + return; + } + + thrust::host_vector h_stride(rank, 0); + h_stride[rank - 1] = 1; + for (int i = rank - 2; i >= 0; i--) { + h_stride[i] = h_stride[i + 1] * dims[i + 1]; + } + thrust::device_vector d_stride = h_stride; + int* ptr_stride = thrust::raw_pointer_cast(d_stride.data()); + + auto& dev_ctx = context.template device_context(); + WhereFunctor functor(ptr_true_index, true_num, ptr_stride, rank, + out_ptr); + platform::ForRange for_range(dev_ctx, true_num); + for_range(functor); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(where, ops::CUDAWhereKernel); diff --git a/paddle/fluid/operators/where_op.h b/paddle/fluid/operators/where_op.h new file mode 100644 index 0000000000000000000000000000000000000000..6a161a2668fa02f181ef99bfbfb501541988a333 --- /dev/null +++ b/paddle/fluid/operators/where_op.h @@ -0,0 +1,95 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { + +template +struct WhereFunctor { + WhereFunctor(const T& true_index, int true_num, const T& stride, int rank, + int64_t* out) + : true_index_(true_index), + true_num_(true_num), + stride_(stride), + rank_(rank), + out_ptr_(out) {} + + HOSTDEVICE void operator()(size_t idx) const { + int index = true_index_[idx]; + for (int j = 0; j < rank_; j++) { + out_ptr_[idx * rank_ + j] = index / stride_[j]; + index -= out_ptr_[idx * rank_ + j] * stride_[j]; + } + } + + const T true_index_; + int true_num_; + const T stride_; + int rank_; + int64_t* out_ptr_; +}; + +using CPUDeviceContext = paddle::platform::CPUDeviceContext; + +template +class CPUWhereKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* condition = context.Input("Condition"); + auto* out = context.Output("Out"); + + const bool* cond_data = condition->data(); + auto numel = condition->numel(); + auto dims = condition->dims(); + const int rank = dims.size(); + + std::vector true_index; + for (auto i = 0; i < numel; i++) { + if (cond_data[i]) { + true_index.push_back(i); + } + } + auto true_num = true_index.size(); + + out->Resize(framework::make_ddim({static_cast(true_num), rank})); + auto out_ptr = out->mutable_data(context.GetPlace()); + + if (true_num == 0) { + return; + } + + std::vector stride(rank); + stride[rank - 1] = 1; + for (int i = rank - 2; i >= 0; i--) { + stride[i] = stride[i + 1] * dims[i + 1]; + } + + auto& dev_ctx = context.template device_context(); + WhereFunctor functor(true_index.data(), true_num, stride.data(), rank, + out_ptr); + platform::ForRange for_range(dev_ctx, true_num); + for_range(functor); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/platform/assert.h b/paddle/fluid/platform/assert.h index 497c7b3c87f94c19b4bf1ded33927a353ee1ab84..e3884a985e08ad94fc95cfa65329f848e0715bd1 100644 --- a/paddle/fluid/platform/assert.h +++ b/paddle/fluid/platform/assert.h @@ -17,40 +17,32 @@ limitations under the License. */ #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) +// For cuda, the assertions can affect performance and it is therefore +// recommended to disable them in production code +// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#assertion #if defined(__CUDA_ARCH__) #include -#define PADDLE_ASSERT(e) \ - do { \ - if (!(e)) { \ - printf("%s:%d Assertion `%s` failed.\n", __FILE__, __LINE__, \ - TOSTRING(e)); \ - asm("trap;"); \ - } \ - } while (0) +#define EXIT() asm("trap;") +#else +#include +#define EXIT() throw std::runtime_error("Exception encounter.") +#endif -#define PADDLE_ASSERT_MSG(e, m) \ - do { \ - if (!(e)) { \ - printf("%s:%d Assertion `%s` failed (%s).\n", __FILE__, __LINE__, \ - TOSTRING(e), m); \ - asm("trap;"); \ - } \ +#define PADDLE_ASSERT(_IS_NOT_ERROR) \ + do { \ + if (!(_IS_NOT_ERROR)) { \ + printf("Exception: %s:%d Assertion `%s` failed.\n", __FILE__, __LINE__, \ + TOSTRING(_IS_NOT_ERROR)); \ + EXIT(); \ + } \ } while (0) -#define PADDLE_ASSERT_MSG_CODE(e, m, c) \ - do { \ - if (!(e)) { \ - printf("%s:%d Assertion `%s` failed (%s %ld).\n", __FILE__, __LINE__, \ - TOSTRING(e), m, c); \ - asm("trap;"); \ - } \ +// NOTE: PADDLE_ASSERT is mainly used in CUDA Kernel or HOSTDEVICE function. +#define PADDLE_ASSERT_MSG(_IS_NOT_ERROR, __MSG, __VAL) \ + do { \ + if (!(_IS_NOT_ERROR)) { \ + printf("Exception: %s:%d Assertion `%s` failed (%s %ld).\n", __FILE__, \ + __LINE__, TOSTRING(_IS_NOT_ERROR), __MSG, __VAL); \ + EXIT(); \ + } \ } while (0) -#else -#include -// For cuda, the assertions can affect performance and it is therefore -// recommended to disable them in production code -// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#assertion -#define PADDLE_ASSERT(e) assert((e)) -#define PADDLE_ASSERT_MSG(e, m) assert((e) && (m)) -#define PADDLE_ASSERT_MSG_CODE(e, m, c) assert((e) && (m) && (c || 1)) -#endif diff --git a/paddle/fluid/platform/cuda_device_function.h b/paddle/fluid/platform/cuda_device_function.h index 31b6c38d613cf9df8fa7e8f6a8e1cfa310280968..202613244deb02c05c39ed18abaa18d79078db33 100644 --- a/paddle/fluid/platform/cuda_device_function.h +++ b/paddle/fluid/platform/cuda_device_function.h @@ -63,7 +63,8 @@ inline static int RoundToPowerOfTwo(int dim) { template __forceinline__ __device__ T CudaShuffleDownSync(unsigned mask, T val, - int delta, int width = 32) { + int delta, + int width = warpSize) { #if CUDA_VERSION < 9000 return __shfl_down(val, delta, width); #else @@ -71,6 +72,16 @@ __forceinline__ __device__ T CudaShuffleDownSync(unsigned mask, T val, #endif } +template +__forceinline__ __device__ T CudaShuffleXorSync(unsigned mask, T val, + int width = warpSize) { +#if CUDA_VERSION < 9000 + return __shfl_xor(val, width); +#else + return __shfl_xor_sync(mask, val, width); +#endif +} + // CUDA 9.0 have native compatible float16 shfl_down #if CUDA_VERSION < 9000 template <> @@ -80,6 +91,11 @@ __forceinline__ __device__ float16 CudaShuffleDownSync(unsigned mask, return float16( __shfl_down(static_cast(val), static_cast(delta), width)); } +template <> +__forceinline__ __device__ float16 CudaShuffleXorSync(unsigned mask, + float16 val, int width) { + return float16(__shfl_xor(static_cast(val), width)); +} #else template <> __forceinline__ __device__ float16 CudaShuffleDownSync(unsigned mask, @@ -88,6 +104,11 @@ __forceinline__ __device__ float16 CudaShuffleDownSync(unsigned mask, return float16(__shfl_down_sync(mask, static_cast(val), static_cast(delta), width)); } +template <> +__forceinline__ __device__ float16 CudaShuffleXorSync(unsigned mask, + float16 val, int width) { + return float16(__shfl_xor_sync(mask, static_cast(val), width)); +} #endif template diff --git a/paddle/fluid/platform/cudnn_desc.h b/paddle/fluid/platform/cudnn_desc.h index 1062b403f289610a6dec28dead9177d387f0d4e0..4ed51acb587ba042f7e6ff54713854da449eb723 100644 --- a/paddle/fluid/platform/cudnn_desc.h +++ b/paddle/fluid/platform/cudnn_desc.h @@ -29,13 +29,14 @@ namespace platform { using framework::Tensor; template -cudnnDataType_t ToCudnnDataType(const T& t) { +inline cudnnDataType_t ToCudnnDataType(const T& t) { auto type = framework::ToDataType(t); return ToCudnnDataType(type); } template <> -cudnnDataType_t ToCudnnDataType(const framework::proto::VarType::Type& t) { +inline cudnnDataType_t ToCudnnDataType( + const framework::proto::VarType::Type& t) { cudnnDataType_t type = CUDNN_DATA_FLOAT; switch (t) { case framework::proto::VarType::FP16: @@ -59,14 +60,14 @@ class ActivationDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE(dynload::cudnnDestroyActivationDescriptor(t)); + CUDNN_ENFORCE(dynload::cudnnDestroyActivationDescriptor(t)); t = nullptr; } } }; ActivationDescriptor() { T* raw_ptr; - PADDLE_ENFORCE(dynload::cudnnCreateActivationDescriptor(&raw_ptr)); + CUDNN_ENFORCE(dynload::cudnnCreateActivationDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } template @@ -88,14 +89,14 @@ class TensorDescriptor { struct Deleter { void operator()(T* t) { if (t != nullptr) { - PADDLE_ENFORCE(dynload::cudnnDestroyTensorDescriptor(t)); + CUDNN_ENFORCE(dynload::cudnnDestroyTensorDescriptor(t)); t = nullptr; } } }; TensorDescriptor() { T* raw_ptr; - PADDLE_ENFORCE(dynload::cudnnCreateTensorDescriptor(&raw_ptr)); + CUDNN_ENFORCE(dynload::cudnnCreateTensorDescriptor(&raw_ptr)); desc_.reset(raw_ptr); } T* desc() { return desc_.get(); } @@ -111,7 +112,7 @@ class TensorDescriptor { if (groups > 1) { dims_with_group[1] = dims_with_group[1] / groups; } - PADDLE_ENFORCE(dynload::cudnnSetTensorNdDescriptor( + CUDNN_ENFORCE(dynload::cudnnSetTensorNdDescriptor( desc_.get(), ToCudnnDataType(tensor.type()), dims_with_group.size(), dims_with_group.data(), strides.data())); } @@ -120,5 +121,83 @@ class TensorDescriptor { std::unique_ptr desc_; }; +class FilterDescriptor { + public: + using T = cudnnFilterStruct; + struct Deleter { + void operator()(T* t) { + if (t != nullptr) { + CUDNN_ENFORCE(dynload::cudnnDestroyFilterDescriptor(t)); + t = nullptr; + } + } + }; + FilterDescriptor() { + T* raw_ptr; + CUDNN_ENFORCE(dynload::cudnnCreateFilterDescriptor(&raw_ptr)); + desc_.reset(raw_ptr); + } + T* desc() { return desc_.get(); } + T* desc() const { return desc_.get(); } + + void set(const Tensor& tensor, const cudnnTensorFormat_t format, + const int groups = 1) { + auto dims = framework::vectorize2int(tensor.dims()); + if (groups > 1) { + dims[1] = dims[1] / groups; + } + CUDNN_ENFORCE(dynload::cudnnSetFilterNdDescriptor( + desc_.get(), ToCudnnDataType(tensor.type()), format, dims.size(), + dims.data())); + } + + private: + std::unique_ptr desc_; +}; + +class ConvolutionDescriptor { + public: + using T = cudnnConvolutionStruct; + struct Deleter { + void operator()(T* t) { + if (t != nullptr) { + CUDNN_ENFORCE(dynload::cudnnDestroyConvolutionDescriptor(t)); + t = nullptr; + } + } + }; + ConvolutionDescriptor() { + T* raw_ptr; + CUDNN_ENFORCE(dynload::cudnnCreateConvolutionDescriptor(&raw_ptr)); + desc_.reset(raw_ptr); + } + T* desc() { return desc_.get(); } + T* desc() const { return desc_.get(); } + + void set(cudnnDataType_t dtype, const std::vector& pads, + const std::vector& strides, const std::vector& dilations, + const int groups = 1) { + cudnnDataType_t compute_type = + (dtype == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT; + T* desc = desc_.get(); + CUDNN_ENFORCE(dynload::cudnnSetConvolutionNdDescriptor( + desc, pads.size(), pads.data(), strides.data(), dilations.data(), + CUDNN_CROSS_CORRELATION, compute_type)); + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + desc, CUDNN_DEFAULT_MATH)); +#if CUDNN_VERSION_MIN(7, 0, 1) + CUDNN_ENFORCE( + platform::dynload::cudnnSetConvolutionGroupCount(desc, groups)); + if (dtype == CUDNN_DATA_HALF) { + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + desc, CUDNN_TENSOR_OP_MATH)); + } +#endif + } + + private: + std::unique_ptr desc_; +}; + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/CMakeLists.txt b/paddle/fluid/platform/dynload/CMakeLists.txt index 07159d4a12ef4b628f7705ed206d3334be46dfc8..2b63c81859d94eb62437439edbb71dc8c1dfbb42 100644 --- a/paddle/fluid/platform/dynload/CMakeLists.txt +++ b/paddle/fluid/platform/dynload/CMakeLists.txt @@ -16,6 +16,7 @@ if (CUPTI_FOUND) list(APPEND CUDA_SRCS cupti.cc) endif(CUPTI_FOUND) nv_library(dynload_cuda SRCS ${CUDA_SRCS} DEPS dynamic_loader) +configure_file(warpctc_lib_path.h.in ${CMAKE_CURRENT_BINARY_DIR}/warpctc_lib_path.h) cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc) if (WITH_MKLML) cc_library(dynload_mklml SRCS mklml.cc DEPS dynamic_loader mklml) diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index 15d516836652ea4ea4d1bcdf35022e6b79cc3b52..62f623b175e78ba7fc94ed9ab9a96b38f3d3e271 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "gflags/gflags.h" #include "glog/logging.h" #include "paddle/fluid/platform/dynload/cupti_lib_path.h" +#include "paddle/fluid/platform/dynload/warpctc_lib_path.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/port.h" @@ -52,6 +53,7 @@ namespace paddle { namespace platform { namespace dynload { static constexpr char cupti_lib_path[] = CUPTI_LIB_PATH; +static constexpr char warpctc_lib_path[] = WARPCTC_LIB_PATH; #if defined(_WIN32) && defined(PADDLE_WITH_CUDA) static constexpr char* win_cublas_lib = "cublas64_" PADDLE_CUDA_BINVER ".dll"; @@ -211,12 +213,16 @@ void* GetCurandDsoHandle() { } void* GetWarpCTCDsoHandle() { + std::string warpctc_dir = warpctc_lib_path; + if (!FLAGS_warpctc_dir.empty()) { + warpctc_dir = FLAGS_warpctc_dir; + } #if defined(__APPLE__) || defined(__OSX__) - return GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.dylib"); + return GetDsoHandleFromSearchPath(warpctc_dir, "libwarpctc.dylib"); #elif defined(_WIN32) - return GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "warpctc.dll"); + return GetDsoHandleFromSearchPath(warpctc_dir, "warpctc.dll"); #else - return GetDsoHandleFromSearchPath(FLAGS_warpctc_dir, "libwarpctc.so"); + return GetDsoHandleFromSearchPath(warpctc_dir, "libwarpctc.so"); #endif } diff --git a/paddle/fluid/framework/ir/sync_batch_norm_pass.h b/paddle/fluid/platform/dynload/warpctc_lib_path.h.in similarity index 67% rename from paddle/fluid/framework/ir/sync_batch_norm_pass.h rename to paddle/fluid/platform/dynload/warpctc_lib_path.h.in index 694fae74943060880ef199298064d20c5a526d18..dc5064f45735a9871b6b9f39fac06723c1b536f3 100644 --- a/paddle/fluid/framework/ir/sync_batch_norm_pass.h +++ b/paddle/fluid/platform/dynload/warpctc_lib_path.h.in @@ -14,18 +14,4 @@ limitations under the License. */ #pragma once -#include -#include "paddle/fluid/framework/ir/pass.h" - -namespace paddle { -namespace framework { -namespace ir { - -class SyncBatchNormPass : public Pass { - protected: - void ApplyImpl(ir::Graph* graph) const override; -}; - -} // namespace ir -} // namespace framework -} // namespace paddle +#define WARPCTC_LIB_PATH "@WARPCTC_INSTALL_DIR@/lib/" diff --git a/paddle/fluid/platform/ngraph_helper.h b/paddle/fluid/platform/ngraph_helper.h index e74f57a79a66ea8fe8c9b972a9a2ec9d722731eb..9e6521653b80abec1c5212f5deb84153335c2a9c 100644 --- a/paddle/fluid/platform/ngraph_helper.h +++ b/paddle/fluid/platform/ngraph_helper.h @@ -16,7 +16,9 @@ limitations under the License. */ #pragma once #include +#include #include +#include #include #include "ngraph/ngraph.hpp" @@ -103,6 +105,25 @@ std::shared_ptr GetOutputNode( return GetNode(op, name, op->Outputs(), ngb_node_map); } +template +std::shared_ptr CreateConstant(const ngraph::element::Type& type, + ngraph::Shape shape, + std::initializer_list values) { + std::shared_ptr result; + if (values.size() == 1 && shape != ngraph::Shape{} && // NOLINT + shape != ngraph::Shape{1}) { + result = std::make_shared(type, ngraph::Shape{}, + std::vector{values}); + ngraph::AxisSet axis_set; + for (size_t i = 0; i < shape.size(); ++i) axis_set.insert(i); + result = std::make_shared(result, shape, axis_set); + } else { + result = std::make_shared(type, shape, + std::vector{values}); + } + return result; +} + void SetOutputNode( const std::shared_ptr& op, const std::string name, std::shared_ptr node, diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index 4f9885b5839bf639b5d40911f2bb33071c2b5422..633e3259adaaedc92ce2f3420f4d1dbf86387143 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/pybind/const_value.h" -#include "paddle/fluid/framework/details/memory_optimize_pass.h" +#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimize_pass.h" #include "paddle/fluid/framework/ir/node.h" #include "paddle/fluid/framework/op_proto_maker.h" #include "paddle/fluid/framework/operator.h" @@ -34,7 +34,7 @@ void BindConstValue(pybind11::module* m) { m->def("kControlDepVarName", [] { return framework::ir::Node::kControlDepVarName; }); m->def("kNewGradSuffix", [] { return framework::kNewGradSuffix; }); - m->def("kMemOptSkipVars", [] { return framework::details::kMemOptSkipVars; }); + m->def("kMemOptSkipVars", [] { return framework::ir::kMemOptSkipVars; }); auto op_proto_and_checker_maker = m->def_submodule("op_proto_and_checker_maker"); diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index 236afc77f708c344665821edd4f7c7841c300465..b650225c64a9a37e46d5b6f14eb2f03bebbaa71f 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include "paddle/fluid/inference/api/analysis_predictor.h" @@ -229,6 +230,15 @@ void BindAnalysisConfig(py::module *m) { py::arg("min_subgraph_size") = 3, py::arg("precision_mode") = AnalysisConfig::Precision::kFloat32, py::arg("use_static") = true) + .def("enable_anakin_engine", &AnalysisConfig::EnableAnakinEngine, + py::arg("max_batch_size") = 1, + py::arg("max_input_shape") = + std::map>(), + py::arg("min_subgraph_size") = 6, + py::arg("precision_mode") = AnalysisConfig::Precision::kFloat32, + py::arg("auto_config_layout") = false, + py::arg("passes_filter") = std::vector(), + py::arg("ops_filter") = std::vector()) .def("tensorrt_engine_enabled", &AnalysisConfig::tensorrt_engine_enabled) .def("switch_ir_debug", &AnalysisConfig::SwitchIrDebug, py::arg("x") = true) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 8545b14e71c16cf7fb0fc1cc3bb092ae1425112d..63d37223ca7a83ba47081a6b3fc90ec510866cf8 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -21,11 +21,11 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/details/alloc_continuous_space_for_grad_pass.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/garbage_collector.h" +#include "paddle/fluid/framework/ir/alloc_continuous_space_for_grad_pass.h" #include "paddle/fluid/framework/ir/pass_builder.h" #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor.h" @@ -170,9 +170,9 @@ PYBIND11_MODULE(core, m) { m.def("_set_eager_deletion_mode", &paddle::framework::SetEagerDeletionMode); m.def("_set_fuse_parameter_group_size", - &paddle::framework::details::SetFuseParameterGroupsSize); + &paddle::framework::ir::SetFuseParameterGroupsSize); m.def("_set_fuse_parameter_memory_size", - &paddle::framework::details::SetFuseParameterMemorySize); + &paddle::framework::ir::SetFuseParameterMemorySize); m.add_object("_cleanup", py::capsule([]() { ScopePool::Instance().Clear(); })); @@ -383,28 +383,37 @@ PYBIND11_MODULE(core, m) { LoD is short for Level of Details and is usually used for varied sequence length. You can skip the following comment if you don't need optional LoD. - For example: - A LoDTensor X can look like the example below. It contains 2 sequences. - The first has length 2 and the second has length 3, as described by x.lod. + For example, a LoDTensor X can look like the example below. It contains + 2 sequences. The first has length 2 and the second has length 3, as + described by x.lod. - The first tensor dimension 5=2+3 is calculated from LoD if it's available. - It means the total number of sequence element. In X, each element has 2 - columns, hence [5, 2]. + The first tensor dimension 5=2+3 is calculated from LoD if it's available. + It means the total number of sequence element. In X, each element has 2 + columns, hence [5, 2]. - x.lod = [[2, 3]] - x.data = [[1, 2], [3, 4], - [5, 6], [7, 8], [9, 10]] - x.shape = [5, 2] + x.lod = [[2, 3]] + + x.data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]] - LoD can have multiple levels (for example, a paragraph can have multiple - sentences and a sentence can have multiple words). In the following - LodTensor Y, the lod_level is 2. It means there are 2 sequence, the - first sequence length is 2 (has 2 sub-sequences), the second one's - length is 1. The first sequence's 2 sub-sequences have length 2 and 2, - respectively. And the second sequence's 1 sub-sequence has length 3. + x.shape = [5, 2] - y.lod = [[2 1], [2 2 3]] - y.shape = [2+2+3, ...] + LoD can have multiple levels (for example, a paragraph can have multiple + sentences and a sentence can have multiple words). In the following + LodTensor Y, the lod_level is 2. It means there are 2 sequence, the + first sequence length is 2 (has 2 sub-sequences), the second one's + length is 1. The first sequence's 2 sub-sequences have length 2 and 2, + respectively. And the second sequence's 1 sub-sequence has length 3. + + y.lod = [[2 1], [2 2 3]] + + y.shape = [2+2+3, ...] + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + + t = fluid.LoDTensor() Note: In above description, LoD is length-based. In Paddle internal @@ -416,7 +425,6 @@ PYBIND11_MODULE(core, m) { self-explanatory. In this case, it must be length-based. Due to history reasons. when LoD is called lod in public API, it might be offset-based. Users should be careful about it. - )DOC") .def("__array__", [](Tensor &self) { return TensorToPyArray(self); }) .def("__init__", @@ -454,6 +462,16 @@ PYBIND11_MODULE(core, m) { Args: lod (List[List[int]]): the lod to be set. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_lod([[0, 2, 5]]) )DOC") .def("set_recursive_sequence_lengths", [](LoDTensor &self, const std::vector> @@ -480,6 +498,16 @@ PYBIND11_MODULE(core, m) { Args: recursive_sequence_lengths (List[List[int]]): sequence lengths. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_recursive_sequence_lengths([[2, 3]]) )DOC") .def("lod", [](LoDTensor &self) -> std::vector> { @@ -495,6 +523,17 @@ PYBIND11_MODULE(core, m) { Returns: out (List[List[int]]): the lod of the LoDTensor. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_lod([[0, 2, 5]]) + print(t.lod()) # [[0, 2, 5]] )DOC") // Set above comments of set_lod. .def("recursive_sequence_lengths", @@ -511,6 +550,17 @@ PYBIND11_MODULE(core, m) { Returns: out (List[List[int]): the sequence lengths. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_recursive_sequence_lengths([[2, 3]]) + print(t.recursive_sequence_lengths()) # [[2, 3]] )DOC") .def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool { @@ -523,6 +573,17 @@ PYBIND11_MODULE(core, m) { Returns: out (bool): whether the lod is valid. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t.set_recursive_sequence_lengths([[2, 3]]) + print(t.has_valid_recursive_sequence_lengths()) # True )DOC") .def("__getitem__", PySliceTensor, py::return_value_policy::reference, R"DOC( @@ -807,6 +868,12 @@ All parameter, weight, gradient are variables in Paddle. CUDAPlace is a descriptor of a device. It represents a GPU, and each CUDAPlace has a dev_id to indicate the number of cards represented by the current CUDAPlace. The memory of CUDAPlace with different dev_id is not accessible. + + Examples: + .. code-block:: python + + gpu_place = fluid.CUDAPlace(0) + )DOC") .def("__init__", [](platform::CUDAPlace &self, int dev_id) { @@ -831,6 +898,12 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "CPUPlace", R"DOC( CPUPlace is a descriptor of a device. It represents a CPU, and the memory CPUPlace can be accessed by CPU. + + Examples: + .. code-block:: python + + cpu_place = fluid.CPUPlace() + )DOC") .def(py::init<>()) .def("_type", &PlaceIndex) @@ -844,6 +917,12 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "CUDAPinnedPlace", R"DOC( CUDAPinnedPlace is a descriptor of a device. The memory of CUDAPinnedPlace can be accessed by GPU and CPU. + + Examples: + .. code-block:: python + + place = fluid.CUDAPinnedPlace() + )DOC") .def("__init__", [](platform::CUDAPinnedPlace &self) { @@ -985,7 +1064,16 @@ All parameter, weight, gradient are variables in Paddle. return res; }); - py::class_(m, "LoDTensorArray") + py::class_(m, "LoDTensorArray", R"DOC( + Array of LoDTensor. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + + arr = fluid.LoDTensorArray() +)DOC") .def("__init__", [](LoDTensorArray &instance) { new (&instance) LoDTensorArray(); }) .def("__getitem__", @@ -1004,7 +1092,20 @@ All parameter, weight, gradient are variables in Paddle. self.back().ShareDataWith(t); self.back().set_lod(t.lod()); }, - py::arg("tensor"), "Append a LoDensor to LoDTensorArray."); + py::arg("tensor"), R"DOC( + Append a LoDensor to LoDTensorArray. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + arr = fluid.LoDTensorArray() + t = fluid.LoDTensor() + t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + arr.append(t) + )DOC"); m.def("IsInplace", [](std::string op) -> bool { return operators::IsInplace(op); }); @@ -1403,6 +1504,9 @@ All parameter, weight, gradient are variables in Paddle. return &self.GetLocalScopes(); }, py::return_value_policy::reference) + .def("drop_local_exe_scopes", &ParallelExecutor::DropLocalExeScopes) + .def("_need_create_local_exe_scopes", + &ParallelExecutor::NeedCreateLocalExeScope) .def("feed_tensors_into_local_scopes", &ParallelExecutor::FeedTensorsIntoLocalScopes) .def("feed_and_split_tensor_into_local_scopes", diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index cec21f40073e2f674f8d843c5dc9934524bdb395..08e43bf24ce1a6863f13b6334f9b3272e4414ff5 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -472,6 +472,9 @@ inline std::string TensorDTypeToPyDTypeStr( } // namespace details inline py::array TensorToPyArray(const framework::Tensor &tensor) { + if (!tensor.IsInitialized()) { + return py::array(); + } bool is_gpu_tensor = platform::is_gpu_place(tensor.place()); const auto &tensor_dims = tensor.dims(); auto tensor_dtype = tensor.type(); diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 51093d859f2713fc87c96f010eaca211ec4d11c5..f1630e0b509f214321ff8c3bb9857803be81ec16 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -931,6 +931,12 @@ EOF ./clean.sh } + +function build_document_preview() { + sh /paddle/tools/document_preview.sh ${PORT} +} + + function main() { local CMD=$1 local parallel_number=$2 @@ -1025,6 +1031,11 @@ function main() { test_fluid_lib) test_fluid_lib ;; + document) + cmake_gen ${PYTHON_ABI:-""} + build ${parallel_number} + build_document_preview + ;; *) print_usage exit 1 diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 631257cc2188fa704ca0273cc4fe378860ab1179..adc7c23f45a3b0a39272fa7d5b2bcab51bb8c690 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -137,11 +137,10 @@ def __bootstrap__(): 'paddle_num_threads', "dist_threadpool_size", 'eager_delete_tensor_gb', 'fast_eager_deletion_mode', 'memory_fraction_of_eager_deletion', 'allocator_strategy', 'reader_queue_speed_test_mode', - 'print_sub_graph_dir', 'pe_profile_fname', 'warpctc_dir', - 'inner_op_parallelism', 'enable_parallel_graph', - 'fuse_parameter_groups_size', 'multiple_of_cupti_buffer_size', - 'enable_subgraph_optimize', 'fuse_parameter_memory_size', - 'tracer_profile_fname' + 'print_sub_graph_dir', 'pe_profile_fname', 'inner_op_parallelism', + 'enable_parallel_graph', 'fuse_parameter_groups_size', + 'multiple_of_cupti_buffer_size', 'enable_subgraph_optimize', + 'fuse_parameter_memory_size', 'tracer_profile_fname' ] if 'Darwin' not in sysstr: read_env_flags.append('use_pinned_memory') diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 9400eaadaa65b63f52513b43f76b3f06b731460d..41f9016edcb0964b4a95c10e257d10d548306ee8 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -457,7 +457,13 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, .. code-block:: python # network configuration code - # ... + # loss from ... + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + + y_predict = fluid.layers.fc(input=x, size=1, act=None) + loss = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_loss = fluid.layers.mean(loss) param_grad_list = fluid.backward.append_backward(loss=avg_loss) """ diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 624c9934d5392b57526edea68254ddf45bd79f4c..f01a6dd9da2dd518227d0f45bab9a140191d38de 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -58,20 +58,34 @@ class CompiledProgram(object): optimizations, for example. * Pre-compute some logic once so that each run is faster. * Transform the program so that it can run in multiple devices. - * TODO: transform the program for optimized inference or distributed - training. + * Transform the program for optimized inference or distributed + training. **Note that: this part is not finished.** Example: .. code-block:: python - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(startup) - compiled_prog = compiler.CompiledProgram(main).with_data_parallel( - loss_name=loss.name) - for i in range(5): - test_loss, = exe.run(compiled_prog, - feed=feed_dict, - fetch_list=[loss.name]) + + import paddle.fluid as fluid + import paddle.fluid.compiler as compiler + import numpy + import os + + place = fluid.CUDAPlace(0) # fluid.CPUPlace() + exe = fluid.Executor(place) + + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + fluid.default_startup_program().random_seed=1 + exe.run(fluid.default_startup_program()) + compiled_prog = compiler.CompiledProgram( + fluid.default_main_program()) + + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) Args: program_or_graph (Graph|Program): If it's Program, it will be first @@ -108,6 +122,44 @@ class CompiledProgram(object): places=None): """Configs the program to run in data parallel way. + Example: + .. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.compiler as compiler + import numpy + import os + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + # NOTE: If you use CPU to run the program, you need + # to specify the CPU_NUM, otherwise, fluid will use + # all the number of the logic core as the CPU_NUM, + # in that case, the batch size of the input should be + # greater than CPU_NUM, if not, the process will be + # failed by an exception. + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + exe = fluid.Executor(place) + + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + fluid.default_startup_program().random_seed=1 + exe.run(fluid.default_startup_program()) + compiled_prog = compiler.CompiledProgram( + fluid.default_main_program()).with_data_parallel( + loss_name=loss.name) + + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) + Args: loss_name (str): The loss name must set in training. Default None. build_strategy(BuildStrategy): build_strategy is used to diff --git a/python/paddle/fluid/contrib/int8_inference/utility.py b/python/paddle/fluid/contrib/int8_inference/utility.py index b35d9f2424ccf093f70e75b13e23f6c5ad59e859..605dfdf53d0cb44972defcc3f86aa95982b82e41 100644 --- a/python/paddle/fluid/contrib/int8_inference/utility.py +++ b/python/paddle/fluid/contrib/int8_inference/utility.py @@ -634,7 +634,8 @@ class Calibrator(object): break starting_iter = int(0.6 * ending_iter) bin_width = hist_edeges[1] - hist_edeges[0] - P_sum = len(activation_blob) + + P_sum = len(np.array(activation_blob).ravel()) min_kl_divergence = 0 min_kl_index = 0 kl_inited = False diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 3809e327943832571a1bde6a53a0a6e7fbd13bdd..0d989903a9aea018913e3ee30e2b80f9341f77c0 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -22,7 +22,7 @@ from .... import unique_name __all__ = [ 'QuantizationTransformPass', 'QuantizationFreezePass', 'ConvertToInt8Pass', - 'TransformForMobilePass' + 'TransformForMobilePass', 'ScaleForTrainingPass', 'ScaleForInferencePass' ] @@ -383,7 +383,7 @@ class QuantizationTransformPass(object): data_type = 'float64' if var_node.dtype( ) == core.VarDesc.VarType.FP64 else 'float32' _init_var_node( - scale_in_node, + state_in_node, np.ones( [1], dtype=data_type), self._scope, @@ -962,3 +962,158 @@ class TransformForMobilePass(object): graph.safe_remove_nodes(op_node) graph.resolve_hazard() return graph + + +class ScaleForTrainingPass(object): + def __init__(self, scope=None, place=None, moving_rate=0.9): + """ + This pass is used for calculating output scales of some operators. + These output scales may be used by tensorRT or some other inference engines. + + Args: + scope(fluid.Scope): The scope is used to initialize these new parameters. + place(fluid.CPUPlace|fluid.CUDAPlace): The place is used to initialize new parameters. + moving_rate(float): The decay coefficient of moving average. The default value is 0.9. + """ + self._scope = scope + self._place = place + self._moving_rate = moving_rate + self._is_test = None + self._teller_set = [ + "mul", "conv2d", "pool2d", "relu", "softmax", "sigmoid", + "depthwise_conv2d", "batch_norm", "concat", "tanh", "pad", + "elementwise_add", "elementwise_mul", "dropout", "split", "prelu", + "conv2d_transpose", "leaky_relu" + ] + + def apply(self, graph): + """ + Insert the `moving_average_abs_max_scale` op in order to calculate output scales + of operators in the teller_set. + + Args: + graph(IrGraph): the target graph. + """ + self._is_test = graph.is_test() + ops = graph.all_op_nodes() + for op_node in ops: + name = op_node.name() + if name in self._teller_set: + if len(op_node.output_arg_names()) != 1: + continue + in_node = graph._find_node_by_name( + op_node.outputs, op_node.output_arg_names()[0]) + out_node = graph.create_var_node_from_desc(in_node.var()) + scale_node = graph.create_persistable_node( + name=self._scale_name(in_node.name()), + var_type=core.VarDesc.VarType.LOD_TENSOR, + shape=[1], + var_dtype=in_node.dtype()) + ins = {'X': in_node} + outs = {'Out': out_node, 'OutScale': scale_node} + if not self._is_test: + state_in_node = graph.create_persistable_node( + name=unique_name.generate('scale_state@'), + var_type=core.VarDesc.VarType.LOD_TENSOR, + var_dtype=in_node.dtype(), + shape=[1]) + data_type = 'float64' if in_node.dtype( + ) == core.VarDesc.VarType.FP64 else 'float32' + _init_var_node( + state_in_node, + np.ones( + [1], dtype=data_type), + self._scope, + self._place) + accum_in_node = graph.create_persistable_node( + name=unique_name.generate('scale_accum@'), + var_type=core.VarDesc.VarType.LOD_TENSOR, + var_dtype=in_node.dtype(), + shape=[1]) + _init_var_node( + accum_in_node, + np.ones( + [1], dtype=data_type), + self._scope, + self._place) + state_out_node = graph.create_var_node_from_desc( + state_in_node.var()) + accum_out_node = graph.create_var_node_from_desc( + accum_in_node.var()) + + ins['InState'] = state_in_node + ins['InAccum'] = accum_in_node + outs['OutState'] = state_out_node + outs['OutAccum'] = accum_out_node + + attrs = { + 'moving_rate': self._moving_rate, + 'is_test': self._is_test, + 'op_role': core.op_proto_and_checker_maker.OpRole.Forward + } + scale_op_node = graph.create_op_node( + op_type='moving_average_abs_max_scale', + attrs=attrs, + inputs=ins, + outputs=outs) + graph.link_to(in_node, scale_op_node) + graph.link_to(scale_op_node, out_node) + graph.link_to(scale_op_node, scale_node) + if not self._is_test: + graph.link_to(state_in_node, scale_op_node) + graph.link_to(accum_in_node, scale_op_node) + graph.link_to(scale_op_node, state_out_node) + graph.link_to(scale_op_node, accum_out_node) + graph.resolve_hazard() + return graph + + def _scale_name(self, var_name): + """ + Return the scale name for the var named `var_name`. + """ + return "%s@scale" % (var_name) + + +class ScaleForInferencePass(object): + def __init__(self, scope=None): + """ + This pass is used for setting output scales of some operators. + These output scales may be used by tensorRT or some other inference engines. + + Args: + scope(fluid.Scope): The scope is used to initialize these new parameters. + """ + self._scope = scope + self._teller_set = [ + "mul", "conv2d", "pool2d", "relu", "softmax", "sigmoid", + "depthwise_conv2d", "batch_norm", "concat", "tanh", "pad", + "elementwise_add", "elementwise_mul", "dropout", "split", "prelu", + "conv2d_transpose", "leaky_relu" + ] + + def apply(self, graph): + """ + Get output scales from the scope and set these scales in op_descs + of operators in the teller_set. + + Args: + graph(IrGraph): the target graph. + """ + ops = graph.all_op_nodes() + for op_node in ops: + name = op_node.name() + if name in self._teller_set: + if len(op_node.output_arg_names()) != 1: + continue + scale_name = self._scale_name(op_node.output_arg_names()[0]) + scale_v = np.array( + self._scope.find_var(scale_name).get_tensor())[0] + op_node.op()._set_attr("out_scale", float(scale_v)) + graph.resolve_hazard() + return graph + + def _scale_name(self, var_name): + """ + Return the scale name for the var named `var_name`. + """ + return "%s@scale" % (var_name) diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py b/python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed41da0f842b5eac8fd622a96a2fbd68adf98ae --- /dev/null +++ b/python/paddle/fluid/contrib/slim/tests/test_quantization_scale_pass.py @@ -0,0 +1,190 @@ +# copyright (c) 2018 paddlepaddle authors. all rights reserved. +# +# licensed under the apache license, version 2.0 (the "license"); +# you may not use this file except in compliance with the license. +# you may obtain a copy of the license at +# +# http://www.apache.org/licenses/license-2.0 +# +# unless required by applicable law or agreed to in writing, software +# distributed under the license is distributed on an "as is" basis, +# without warranties or conditions of any kind, either express or implied. +# see the license for the specific language governing permissions and +# limitations under the license. + +import os +import unittest +import random +import numpy as np +import six +import paddle.fluid as fluid +import paddle +from paddle.fluid.framework import IrGraph +from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass +from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass +from paddle.fluid.contrib.slim.quantization import ScaleForTrainingPass +from paddle.fluid.contrib.slim.quantization import ScaleForInferencePass +from paddle.fluid import core + +os.environ["CUDA_VISIBLE_DEVICES"] = "0" +os.environ["CPU_NUM"] = "1" + + +def residual_block(img, label, num=1): + def conv_bn_layer(input, + ch_out, + filter_size, + stride, + padding, + act='relu', + bias_attr=False): + tmp = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=bias_attr) + return fluid.layers.batch_norm(input=tmp, act=act) + + hidden = img + for _ in six.moves.xrange(num): + conv = conv_bn_layer(hidden, 20, 3, 1, 1, act=None, bias_attr=True) + short = conv_bn_layer(hidden, 20, 1, 1, 0, act=None) + hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') + fc = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=fc, label=label) + loss = fluid.layers.mean(loss) + return loss + + +class TestQuantizationScalePass(unittest.TestCase): + def quantization_scale(self, + use_cuda, + seed, + activation_quant_type, + weight_quant_type='abs_max', + for_ci=False): + def build_program(main, startup, is_test): + main.random_seed = seed + startup.random_seed = seed + with fluid.unique_name.guard(): + with fluid.program_guard(main, startup): + img = fluid.layers.data( + name='image', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data( + name='label', shape=[1], dtype='int64') + loss = residual_block(img, label, 1) + if not is_test: + opt = fluid.optimizer.Adam(learning_rate=0.0001) + opt.minimize(loss) + return [img, label], loss + + random.seed(0) + np.random.seed(0) + + main = fluid.Program() + startup = fluid.Program() + test_program = fluid.Program() + feeds, loss = build_program(main, startup, False) + build_program(test_program, startup, True) + test_program = test_program.clone(for_test=True) + main_graph = IrGraph(core.Graph(main.desc), for_test=False) + test_graph = IrGraph(core.Graph(test_program.desc), for_test=True) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + scope = fluid.Scope() + with fluid.scope_guard(scope): + exe.run(startup) + transform_pass = QuantizationTransformPass( + scope=scope, + place=place, + activation_quantize_type=activation_quant_type, + weight_quantize_type=weight_quant_type) + transform_pass.apply(main_graph) + transform_pass.apply(test_graph) + scale_training_pass = ScaleForTrainingPass(scope=scope, place=place) + scale_training_pass.apply(main_graph) + dev_name = '_gpu' if use_cuda else '_cpu' + if not for_ci: + marked_nodes = set() + for op in main_graph.all_op_nodes(): + if op.name().find('quantize') > -1: + marked_nodes.add(op) + main_graph.draw('.', 'main_scale' + dev_name, marked_nodes) + marked_nodes = set() + for op in test_graph.all_op_nodes(): + if op.name().find('quantize') > -1: + marked_nodes.add(op) + test_graph.draw('.', 'test_scale' + dev_name, marked_nodes) + + build_strategy = fluid.BuildStrategy() + build_strategy.memory_optimize = False + build_strategy.enable_inplace = False + binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel( + loss_name=loss.name, build_strategy=build_strategy) + iters = 5 + batch_size = 8 + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=500), + batch_size=batch_size) + feeder = fluid.DataFeeder(feed_list=feeds, place=place) + with fluid.scope_guard(scope): + for _ in range(iters): + data = next(train_reader()) + loss_v = exe.run(binary, + feed=feeder.feed(data), + fetch_list=[loss]) + if not for_ci: + print('{}: {}'.format('loss' + dev_name, loss_v)) + + scale_inference_pass = ScaleForInferencePass(scope=scope) + scale_inference_pass.apply(test_graph) + + # Freeze graph for inference, but the weight of fc/conv is still float type. + freeze_pass = QuantizationFreezePass( + scope=scope, place=place, weight_quantize_type=weight_quant_type) + freeze_pass.apply(test_graph) + server_program = test_graph.to_program() + + if not for_ci: + marked_nodes = set() + for op in test_graph.all_op_nodes(): + if op.name().find('quantize') > -1: + marked_nodes.add(op) + test_graph.draw('.', 'quant_scale' + dev_name, marked_nodes) + + with open('quant_scale_model' + dev_name + '.txt', 'w') as f: + f.write(str(server_program)) + + with fluid.scope_guard(scope): + fluid.io.save_inference_model('quant_scale_model' + dev_name, + ['image', 'label'], [loss], exe, + server_program) + + def test_quant_scale_cuda(self): + if fluid.core.is_compiled_with_cuda(): + with fluid.unique_name.guard(): + self.quantization_scale( + True, + seed=1, + activation_quant_type='moving_average_abs_max', + weight_quant_type='channel_wise_abs_max', + for_ci=True) + + def test_quant_scale_cpu(self): + with fluid.unique_name.guard(): + self.quantization_scale( + False, + seed=2, + activation_quant_type='moving_average_abs_max', + weight_quant_type='channel_wise_abs_max', + for_ci=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 0ab981518beb4cc48e18c17e4f0f91c22b60dbb7..d6360fedd4756b6765e141b9e31b08d8ddcf0f5e 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -161,11 +161,13 @@ class Conv2D(layers.Layer): raise ValueError("use_cudnn should be True or False") self._use_cudnn = use_cudnn self._num_channels = num_channels - if (self._num_channels == self._groups and - num_filters % self._num_channels == 0 and not self._use_cudnn): - self._l_type = 'depthwise_conv2d' - else: - self._l_type = 'conv2d' + # if (self._num_channels == self._groups and + # num_filters % self._num_channels == 0 and not self._use_cudnn): + # self._l_type = 'depthwise_conv2d' + # else: + # TODO(jiabin): recover the usage of depthwise_conv2d when it's + # kernel fixed https://github.com/PaddlePaddle/Paddle/issues/17275 + self._l_type = 'conv2d' if groups is None: num_filter_channels = num_channels diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 0b9a23e6769389715535a4ea9dea77bfd3c2707b..063b65e8eefd6407a5b9a16930c8da129e5f7df6 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -38,6 +38,15 @@ def global_scope(): Get the global/default scope instance. There are a lot of APIs use :code:`global_scope` as its default value, e.g., :code:`Executor.run` + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy + + fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace()) + numpy.array(fluid.global_scope().find_var("data").get_tensor()) + Returns: Scope: The global/default scope instance. """ @@ -57,15 +66,20 @@ def scope_guard(scope): Change the global/default scope instance by Python `with` statement. All variable in runtime will assigned to the new scope. - Examples: - >>> import paddle.fluid as fluid - >>> new_scope = fluid.Scope() - >>> with fluid.scope_guard(new_scope): - >>> ... - Args: scope: The new global/default scope. + + Examples: + .. code-block:: python + + import numpy + + new_scope = fluid.Scope() + with fluid.scope_guard(new_scope): + fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace()) + numpy.array(new_scope.find_var("data").get_tensor()) """ + ex = _switch_scope(scope) yield _switch_scope(ex) @@ -75,11 +89,18 @@ def as_numpy(tensor): """ Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information. For higher dimensional sequence data, please use LoDTensor directly. + Examples: - >>> import paddle.fluid as fluid - >>> outs = executor.run(...) - >>> np_outs = map(lambda x: as_numpy(x), outs) - >>> ... + .. code-block:: python + + import paddle.fluid as fluid + import numpy + + new_scope = fluid.Scope() + with fluid.scope_guard(new_scope): + fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace()) + tensor = new_scope.find_var("data").get_tensor() + fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor()) Args: tensor(Variable): a instance of Tensor @@ -98,7 +119,10 @@ def as_numpy(tensor): They can not be completely cast to Python ndarray. \ Please set the parameter 'return_numpy' as 'False' to \ return LoDTensor itself directly.") - return np.array(tensor) + if tensor._is_initialized(): + return np.array(tensor) + else: + return None def has_feed_operators(block, feed_targets, feed_holder_name): @@ -263,42 +287,70 @@ def _as_lodtensor(data, place): class Executor(object): """ - An Executor in Python, supports single/multiple-GPU running, and single/multiple-CPU running. - Python executor takes a program, adds feed operators and fetch operators to this program according - to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides - the variables(or names) that user wants to get after program runs. Note: the executor will run all - operators in the program but not only the operators dependent by the fetch_list. - It stores the global variables into the global scope, and creates a local scope for the temporary - variables. The contents in local scope may be discarded after every minibatch forward/backward - finished. But the global scope variables will be persistent through different runs. - - - Example: + An Executor in Python, supports single/multiple-GPU running, + and single/multiple-CPU running. Python executor takes a program, + adds feed operators and fetch operators to this program according + to feed map and fetch_list. Feed map provides input data for the + program. fetch_list provides the variables(or names) that user wants + to get after program runs. Note: the executor will run all operators + in the program but not only the operators dependent by the fetch_list. + It stores the global variables into the global scope, and creates a + local scope for the temporary variables. The contents in local scope + may be discarded after every minibatch forward/backward finished. + But the global scope variables will be persistent through different runs. + Examples: .. code-block:: python - # First create the Executor. - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - - # Run the startup program once and only once. - # Not need to optimize/compile the startup program. - exe.run(fluid.default_startup_program()) - - # Run the main program directly without compile. - loss, = exe.run(fluid.default_main_program(), - feed=feed_dict, - fetch_list=[loss.name]) - # Or, compiled the program and run. See `CompiledProgram` for more detail. - compiled_prog = compiler.CompiledProgram( - fluid.default_main_program()).with_data_parallel( - loss_name=loss.name) - loss, = exe.run(compiled_prog, - feed=feed_dict, - fetch_list=[loss.name]) + import paddle.fluid as fluid + import paddle.fluid.compiler as compiler + import numpy + import os + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + # Run the startup program once and only once. + # Not need to optimize/compile the startup program. + startup_program.random_seed=1 + exe.run(startup_program) + + # Run the main program directly without compile. + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = exe.run(train_program, + feed={"X": x}, + fetch_list=[loss.name]) + + # Or, compiled the program and run. See `CompiledProgram` + # for more detail. + # NOTE: If you use CPU to run the program, you need + # to specify the CPU_NUM, otherwise, fluid will use + # all the number of the logic core as the CPU_NUM, + # in that case, the batch size of the input should be + # greater than CPU_NUM, if not, the process will be + # failed by an exception. + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + compiled_prog = compiler.CompiledProgram( + train_program).with_data_parallel( + loss_name=loss.name) + loss_data, = exe.run(compiled_prog, + feed={"X": x}, + fetch_list=[loss.name]) Args: - place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device + place(fluid.CPUPlace|fluid.CUDAPlace(n)): indicate the executor run on which device. + """ def __init__(self, place): @@ -392,14 +444,18 @@ class Executor(object): Close this executor. You can no longer use this executor after calling this method. - For the distributed training, this method would free the resource on PServers related to - the current Trainer. - - Example: - >>> cpu = core.CPUPlace() - >>> exe = Executor(cpu) - >>> ... - >>> exe.close() + For the distributed training, this method would free the resource + on PServers related to the current Trainer. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + # execute training or testing + exe.close() """ if not self._closed: self._default_executor.close() @@ -490,13 +546,37 @@ class Executor(object): return_numpy=True, use_program_cache=False): """ - Run program by this Executor. Feed data by feed map, fetch result by fetch_list. - Python executor takes a program, add feed operators and fetch operators to this program according - to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides + Run program by this Executor. Feed data by feed map, fetch result by + fetch_list. Python executor takes a program, add feed operators and + fetch operators to this program according to feed map and fetch_list. + Feed map provides input data for the program. fetch_list provides the variables(or names) that user want to get after program run. - Note: the executor will run all - operators in the program but not only the operators dependent by the fetch_list + Note: the executor will run all operators in the program but not + only the operators dependent by the fetch_list. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy + + # First create the Executor. + place = fluid.CPUPlace() # fluid.CUDAPlace(0) + exe = fluid.Executor(place) + + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + adam = fluid.optimizer.Adam() + adam.minimize(loss) + + # Run the startup program once and only once. + exe.run(fluid.default_startup_program()) + + x = numpy.random.random(size=(10, 1)).astype('float32') + outs = exe.run(feed={'X': x}, + fetch_list=[loss.name]) Args: program(Program|CompiledProgram): the program that need to run, @@ -520,26 +600,6 @@ class Executor(object): Returns: list(numpy.array): fetch result according to fetch_list. - - - Examples: - - >>> data = fluid.layers.data(name='X', shape=[1], dtype='float32') - >>> out = fluid.layers.create_tensor(dtype='float32') - >>> hidden = fluid.layers.fc(input=data, size=10) - >>> fluid.layers.assign(hidden,out) - >>> loss = fluid.layers.mean(out) - >>> adam = fluid.optimizer.Adam() - >>> adam.minimize(loss) - - >>> cpu = core.CPUPlace() - >>> exe = fluid.Executor(cpu) - >>> exe.run(fluid.default_startup_program()) - - >>> x = numpy.random.random(size=(10, 1)).astype('float32') - >>> outs = exe.run( - >>> feed={'X': x}, - >>> fetch_list=[loss.name]) """ if self._closed: @@ -732,13 +792,15 @@ class Executor(object): .. code-block:: python import paddle.fluid as fluid - place = fluid.CPUPlace() + + place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu exe = fluid.Executor(place) - x = fluid.layers.data(name="x", type="int64") - y = fluid.layers.data(name="y", type="int64") + x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64") + y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1) dataset = fluid.DatasetFactory().create_dataset() dataset.set_use_var([x, y]) - filelist = ["dataA.txt", "dataB.txt"] + dataset.set_thread(1) + filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"] dataset.set_filelist(filelist) exe.run(fluid.default_startup_program()) exe.infer_from_dataset(program=fluid.default_main_program(), @@ -811,14 +873,15 @@ class Executor(object): .. code-block:: python import paddle.fluid as fluid - place = fluid.CPUPlace() + + place = fluid.CPUPlace() # you can set place = fluid.CUDAPlace(0) to use gpu exe = fluid.Executor(place) - x = fluid.layers.data(name="x", type="int64") - y = fluid.layers.data(name="y", type="int64") + x = fluid.layers.data(name="x", shape=[10, 10], dtype="int64") + y = fluid.layers.data(name="y", shape=[1], dtype="int64", lod_level=1) dataset = fluid.DatasetFactory().create_dataset() dataset.set_use_var([x, y]) - dataset.set_thread(2) - filelist = ["dataA.txt", "dataB.txt"] + dataset.set_thread(1) + filelist = [] # you should set your own filelist, e.g. filelist = ["dataA.txt"] dataset.set_filelist(filelist) exe.run(fluid.default_startup_program()) exe.train_from_dataset(program=fluid.default_main_program(), diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 17f698e9e1056aa4835daf6195aa8bd646ed9f13..806499ca2e8b73562a79a3c60a05234c33b45fe8 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -81,9 +81,19 @@ _dygraph_current_expected_place_ = None def in_dygraph_mode(): - ''' - Returns(bool): True if the program is running in dynamic graph mode - ''' + """ + Check program status(tracer), Whether it runs in dygraph mode or not + + Returns: + out (boolean): True if the program is running in dynamic graph mode + + Examples: + .. code-block:: python + + if fluid.in_dygraph_mode(): + pass + + """ return _dygraph_tracer_ is not None @@ -100,7 +110,7 @@ def _cpu_num(): def cuda_places(device_ids=None): - ''' + """ Create a list of :code:`fluid.CUDAPlace` objects. If :code:`device_ids` is None, environment variable of @@ -120,7 +130,13 @@ def cuda_places(device_ids=None): Returns: out (list(fluid.CUDAPlace)): gpu place list. - ''' + + Examples: + .. code-block:: python + + cuda_places = fluid.cuda_places() + + """ assert core.is_compiled_with_cuda(), \ "Not compiled with CUDA" if device_ids is None: @@ -135,7 +151,7 @@ def cuda_places(device_ids=None): def cpu_places(device_count=None): - ''' + """ Create a list of :code:`fluid.CPUPlace` objects. If :code:`device_count` is None, the device count would @@ -148,14 +164,20 @@ def cpu_places(device_count=None): Returns: out (list(fluid.CPUPlace)): cpu place list. - ''' + + Examples: + .. code-block:: python + + cpu_places = fluid.cpu_places() + """ + if device_count is None: device_count = _cpu_num() return [core.CPUPlace()] * device_count def cuda_pinned_places(device_count=None): - ''' + """ Create a list of :code:`fluid.CUDAPinnedPlace` objects. If :code:`device_count` is None, the device count would @@ -168,7 +190,15 @@ def cuda_pinned_places(device_count=None): Returns: out (list(fluid.CUDAPinnedPlace)): cuda pinned place list. - ''' + + Examples: + .. code-block:: python + + cuda_pinned_places_cpu_num = fluid.cuda_pinned_places() + # or + cuda_pinned_places = fluid.cuda_pinned_places(1) + + """ assert core.is_compiled_with_cuda(), \ "Not compiled with CUDA" if device_count is None: @@ -3468,24 +3498,28 @@ def program_guard(main_program, startup_program=None): variables to the new main programs. Examples: + .. code-block:: python + + import paddle.fluid as fluid - >>> import paddle.fluid as fluid - >>> main_program = fluid.Program() - >>> startup_program = fluid.Program() - >>> with fluid.program_guard(main_program, startup_program): - >>> data = fluid.layers.data(...) - >>> hidden = fluid.layers.fc(...) + main_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(main_program, startup_program): + data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10, act='relu') Notes: The temporary :code:`Program` can be used if the user does not need to construct either of startup program or main program. Examples: + .. code-block:: python - >>> import paddle.fluid as fluid - >>> main_program = fluid.Program() - >>> # does not care about startup program. Just pass a temporary value. - >>> with fluid.program_guard(main_program, fluid.Program()): - >>> data = ... + import paddle.fluid as fluid + + main_program = fluid.Program() + # does not care about startup program. Just pass a temporary value. + with fluid.program_guard(main_program, fluid.Program()): + data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') Args: main_program(Program): New main program inside `with` statement. diff --git a/python/paddle/fluid/incubate/fleet/base/fleet_base.py b/python/paddle/fluid/incubate/fleet/base/fleet_base.py index c8177842efa9e6c9085e6733678a23a3eb704619..f2f72b0f505fd43607f5104e39f5167f55fa432e 100644 --- a/python/paddle/fluid/incubate/fleet/base/fleet_base.py +++ b/python/paddle/fluid/incubate/fleet/base/fleet_base.py @@ -15,18 +15,21 @@ from __future__ import print_function import abc -import sys from enum import Enum from paddle.fluid.optimizer import SGD +from paddle.fluid.executor import Executor -from role_maker import RoleMakerBase, Role +from role_maker import RoleMakerBase from role_maker import MPISymetricRoleMaker from role_maker import UserDefinedRoleMaker class Mode(Enum): + """ + There are various mode for fleet, each of them is designed for different model. + """ TRANSPILER = 1, PSLIB = 2, COLLECTIVE = 3 @@ -46,17 +49,11 @@ class Fleet(object): def __init__(self, mode): assert isinstance(mode, Mode) - self.is_initialized = False - self.mode = mode - self.workers = 0 - self.servers = 0 - self.worker_endpoints = [] - self.server_endpoints = [] - self.role = Role.WORKER - self.current_endpoint = None - self.current_id = 0 - self.optimizer = None - self.role_maker_ = None + self._is_initialized = False + self._mode = mode + self._optimizer = None + self._role_maker = None + self._executor = None def is_first_worker(self): """ @@ -66,25 +63,25 @@ class Fleet(object): bool: True if this is the first node of worker, False if not. """ - return self.is_worker() and self.current_id == 0 + return self._role_maker.is_first_worker() - def worker_id(self): + def worker_index(self): """ - Get current worker id. + Get current worker index. Returns: int: node id """ - return self.current_id + return self._role_maker.worker_index() - def get_workers(self): + def worker_num(self): """ Get current total worker number. Returns: int: worker number """ - return self.workers + return len(self._role_maker.get_trainer_endpoints()) def is_worker(self): """ @@ -94,7 +91,51 @@ class Fleet(object): bool: True if this is a node of worker, False if not. """ - return self.role == Role.WORKER + return self._role_maker.is_worker() + + def worker_endpoints(self, to_string=False): + """ + Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"]. + + Returns: + list/string: server endpoints + """ + + if to_string: + return ",".join(self._role_maker.get_trainer_endpoints()) + else: + return self._role_maker.get_trainer_endpoints() + + def server_num(self): + """ + Get current total worker number. + + Returns: + int: server number + """ + return len(self._role_maker.get_pserver_endpoints()) + + def server_index(self): + """ + Get current server index. + + Returns: + int: node id + """ + return self._role_maker.server_index() + + def server_endpoints(self, to_string=False): + """ + Get current server endpoints, such as ["127.0.0.1:1001", "127.0.0.1:1002"]. + + Returns: + list/string: server endpoints + """ + + if to_string: + return ",".join(self._role_maker.get_pserver_endpoints()) + else: + return self._role_maker.get_pserver_endpoints() def is_server(self): """ @@ -104,7 +145,7 @@ class Fleet(object): bool: True if this is a node of server, False if not. """ - return self.role == Role.SERVER + return self._role_maker.is_server() def split_files(self, files): """ @@ -119,8 +160,8 @@ class Fleet(object): list: files belongs to this worker. """ file_num = len(files) - trainer_id = self.worker_id() - trainer_num = self.get_workers() + trainer_id = self.worker_index() + trainer_num = self.worker_num() if trainer_num > file_num: raise ValueError("trainer_num should be <= file_num : " "%s > %s" % (trainer_num, file_num)) @@ -132,66 +173,49 @@ class Fleet(object): end += length return files[start:end] - def init(self, role_maker=None): + def init(self, executor, role_maker=None): """ should be called only once in user's python scripts, init() will initialize RoleMaker which is used for identifying current node's role, e.g. worker, server, etc. Args: + executor(Executor): The executor to run fleet. role_maker(RoleMakerBase): subclass of RoleMakerBase. Returns: None """ + if not isinstance(executor, Executor): + raise ValueError("executor must be an instance of Executor") if role_maker and not isinstance(role_maker, RoleMakerBase): raise ValueError("role_maker must be an instance of RoleMakerBase") - self.role_maker_ = role_maker - if isinstance(role_maker, MPISymetricRoleMaker): - self.role_maker_._generate_role() - self.role = Role.WORKER if role_maker._is_worker() else Role.SERVER - self.workers = role_maker._worker_num() - self.servers = role_maker._server_num() - self.server_endpoints = role_maker._get_pserver_endpoints() - self.worker_endpoints = role_maker._get_trainer_endpoints() - self.current_id = role_maker._worker_index( - ) if role_maker._is_worker() else role_maker._server_index() - self.current_endpoint = self.worker_endpoints[self.current_id] \ - if role_maker._is_worker() else self.server_endpoints[self.current_id] + self._role_maker = role_maker + self._role_maker.generate_role() elif isinstance(role_maker, UserDefinedRoleMaker): - self.current_id = role_maker.current_id - self.current_endpoint = role_maker.current_endpoint - self.workers = role_maker.workers - self.worker_endpoints = role_maker.worker_endpoints - self.servers = role_maker.servers - self.server_endpoints = role_maker.server_endpoints - self.role = role_maker.role + self._role_maker = role_maker else: raise ValueError( "role_maker must be an instance of UserDefinedRoleMaker/MPISymetricRoleMaker" ) - self.is_initialized = True + self._is_initialized = True @abc.abstractmethod - def init_worker(self, executor): + def init_worker(self): pass @abc.abstractmethod - def run_worker(self, executor, main_program=None): + def init_server(self, model_dir=None): pass @abc.abstractmethod - def init_server(self, executor, model_dir=None): - pass - - @abc.abstractmethod - def run_server(self, executor): + def run_server(self, ): pass @abc.abstractmethod @@ -199,7 +223,7 @@ class Fleet(object): pass @abc.abstractmethod - def stop(self, executor): + def stop(self): pass @abc.abstractmethod @@ -208,7 +232,6 @@ class Fleet(object): @abc.abstractmethod def save_inference_model(self, - executor, dirname, feeded_var_names, target_vars, @@ -217,21 +240,9 @@ class Fleet(object): pass @abc.abstractmethod - def save_persistables(self, executor, dirname, main_program=None): + def save_persistables(self, dirname, main_program=None): pass - def to_string(self): - infos = """ - mode = {} - workers = {} - server_endpoints = {} - role = {} - current_endpoint = {} - current_id = {} - """.format(self.mode, self.workers, self.server_endpoints, self.role, - self.current_endpoint, self.current_id) - return infos - class DistributedOptimizer(object): """ @@ -245,7 +256,7 @@ class DistributedOptimizer(object): Args: optimizer(Optimizer): subclass of Optimizer. - strategy(dict): the user define config for Optimizer. + strategy(any): the user define config for Optimizer. Returns: None @@ -257,9 +268,6 @@ class DistributedOptimizer(object): if not isinstance(optimizer, SGD.__bases__): raise ValueError("optimizer must be an instance of Optimizer") - if strategy and not isinstance(strategy, dict): - raise ValueError("strategy must be an instance of Dict") - self._optimizer = optimizer self._strategy = strategy @@ -317,8 +325,9 @@ class DistributedOptimizer(object): @abc.abstractmethod def minimize(self, - loss, - startup_program=None, + losses, + scopes=None, + startup_programs=None, parameter_list=None, no_grad_set=None): """ @@ -328,8 +337,9 @@ class DistributedOptimizer(object): `apply_gradients()` into one. Args: - loss (Variable): loss variable to run optimizations. - startup_program (Program): startup_program for initializing parameters + losses (Variable|Variable List): loss variable to run optimizations. + scopes (Scope| Scope List): scope instance. + startup_programs (Program|Program List): startup_program for initializing parameters in `parameter_list`. parameter_list (list): list of Variables to update. no_grad_set (set|None): set of Variables should be ignored. diff --git a/python/paddle/fluid/incubate/fleet/base/role_maker.py b/python/paddle/fluid/incubate/fleet/base/role_maker.py index dfd2273b485adfd5f76c650feef864964ad335a2..5371252213b2624ca44bb54b20a385b306967f8e 100644 --- a/python/paddle/fluid/incubate/fleet/base/role_maker.py +++ b/python/paddle/fluid/incubate/fleet/base/role_maker.py @@ -11,10 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import sys +from __future__ import print_function from enum import Enum +__all__ = [ + 'Role', 'RoleMakerBase', 'MPISymetricRoleMaker', 'UserDefinedRoleMaker' +] + class Role(Enum): WORKER = 1, @@ -30,47 +34,62 @@ class RoleMakerBase(object): """ def __init__(self): - self._trainer_endpoints = [] - self._pserver_endpoints = [] + self._worker_endpoints = [] + self._server_endpoints = [] self._role_is_generated = False + self._role = None + self._current_id = -1 - def _is_worker(self): + def is_worker(self): """ return is_worker() of current process """ raise NotImplementedError("Please implement this method in child class") - def _is_server(self): + def is_server(self): """ return is_server() of current process """ raise NotImplementedError("Please implement this method in child class") - def _get_local_ip(self): + def is_first_worker(self): """ - return get local ip + Check whether the node is the first instance of worker. + Returns: + bool: True if this is the first node of worker, + False if not. """ - import socket - self._ip = socket.gethostbyname(socket.gethostname()) - return self._ip + raise NotImplementedError("Please implement this method in child class") - def _get_trainer_endpoints(self): + def worker_index(self): """ - return trainer endpoints + Get current worker id. + + Returns: + int: node id """ - return self._trainer_endpoints + raise NotImplementedError("Please implement this method in child class") - def _get_pserver_endpoints(self): + def server_index(self): """ - return pserver endpoints + Get current server id. + + Returns: + int: node id """ - return self._pserver_endpoints + raise NotImplementedError("Please implement this method in child class") - def _generate_role(self): + def get_trainer_endpoints(self): """ - generate_role() should be called to identify current process's role + return trainer endpoints """ - raise NotImplementedError("Please implement this method in child class") + return self._worker_endpoints + + def get_pserver_endpoints(self): + """ + return pserver endpoints + """ + return self._server_endpoints class MPIRoleMaker(RoleMakerBase): @@ -82,9 +101,11 @@ class MPIRoleMaker(RoleMakerBase): def __init__(self): super(MPIRoleMaker, self).__init__() from mpi4py import MPI - self._comm = MPI.COMM_WORLD self.MPI = MPI + self._comm = MPI.COMM_WORLD + self._node_type_comm = None self._ips = None + self._ip = None def _get_rank(self): """ @@ -111,7 +132,7 @@ class MPIRoleMaker(RoleMakerBase): """ worker_gather(obj) will call MPI's allgather function """ - if self._is_worker(): + if self.is_worker(): self._node_type_comm.barrier() return self._node_type_comm.allgather(obj) return None @@ -122,19 +143,33 @@ class MPIRoleMaker(RoleMakerBase): """ self._comm.barrier() + def _finalize(self): + """ + finalize the current MPI instance. + """ + pass + def _get_ips(self): """ collect current distributed job's ip list """ - if self._ips == None: - self._ips = self._comm.allgather(self._get_local_ip()) + if not self._ips: + self._ips = self._comm.allgather(self.get_local_ip()) return self._ips - def _finalize(self): + def get_local_ip(self): """ - finalize the current MPI instance. + return get local ip """ - pass + import socket + self._ip = socket.gethostbyname(socket.gethostname()) + return self._ip + + def generate_role(self): + """ + generate_role() should be called to identify current process's role + """ + raise NotImplementedError("Please implement this method in child class") class MPISymetricRoleMaker(MPIRoleMaker): @@ -151,20 +186,18 @@ class MPISymetricRoleMaker(MPIRoleMaker): def _check_role_generation(self): if not self._role_is_generated: - sys.stderr.write("generate_role() should be called first") - sys.exit(-1) - return False + raise NameError("generate_role() should be called first") return True - def _is_first_worker(self): + def is_first_worker(self): """ return whether current process is the first worker assigned by role maker """ if self._check_role_generation(): - return self._is_worker() and 0 == self._worker_index() + return self.is_worker() and 0 == self.worker_index() return False - def _is_worker(self): + def is_worker(self): """ return whether current process is worker assigned by role maker """ @@ -172,7 +205,7 @@ class MPISymetricRoleMaker(MPIRoleMaker): return self._node_type == 1 return False - def _is_server(self): + def is_server(self): """ return whether current process is server assigned by role maker """ @@ -185,7 +218,7 @@ class MPISymetricRoleMaker(MPIRoleMaker): return the current number of worker """ if self._check_role_generation(): - if self._is_worker(): + if self.is_worker(): return self._get_size() / 2 return 0 @@ -194,11 +227,11 @@ class MPISymetricRoleMaker(MPIRoleMaker): return the current number of server """ if self._check_role_generation(): - if self._is_server(): + if self.is_server(): return self._get_size() / 2 return 0 - def _worker_index(self): + def worker_index(self): """ return the index of worker """ @@ -206,7 +239,7 @@ class MPISymetricRoleMaker(MPIRoleMaker): return self._rank / self._proc_per_node return 0 - def _server_index(self): + def server_index(self): """ return the index of server """ @@ -219,7 +252,7 @@ class MPISymetricRoleMaker(MPIRoleMaker): barrier all workers in current distributed job """ if self._check_role_generation(): - if self._is_worker(): + if self.is_worker(): self._node_type_comm.barrier() def _barrier_server(self): @@ -227,17 +260,17 @@ class MPISymetricRoleMaker(MPIRoleMaker): barrier all servers in current distributed job """ if self._check_role_generation(): - if self._is_server(): + if self.is_server(): self._node_type_comm.barrier() - def _generate_role(self): + def generate_role(self): """ generate currently process's role """ if not self._role_is_generated: # TODO(guru4elephant): only allow to be called once - self._trainer_endpoints = self._get_ips() - self._pserver_endpoints = self._get_ips() + self._worker_endpoints = self._get_ips() + self._server_endpoints = self._get_ips() if 0 == self._get_rank() % self._proc_per_node % 2: self._node_type = 0 @@ -250,12 +283,9 @@ class MPISymetricRoleMaker(MPIRoleMaker): class UserDefinedRoleMaker(RoleMakerBase): def __init__(self, current_id=0, - current_endpoint=None, - workers=0, - worker_endpoints=None, - servers=0, - server_endpoints=None, - role=Role.WORKER): + role=Role.WORKER, + worker_num=0, + server_endpoints=None): """ UserDefinedRoleMaker is designed for worker and server assignment under manual. Typically, a worker and a server node will be appointed @@ -263,19 +293,22 @@ class UserDefinedRoleMaker(RoleMakerBase): """ super(UserDefinedRoleMaker, self).__init__() - self.current_id = current_id - self.current_endpoint = current_endpoint - self.workers = workers - self.worker_endpoints = worker_endpoints - self.servers = servers - self.server_endpoints = server_endpoints - self.role = role + self._current_id = current_id + self._role = role + self._worker_num = worker_num + self._server_endpoints = server_endpoints + + def is_worker(self): + return self._role == Role.WORKER + + def is_server(self): + return self._role == Role.SERVER - def _is_worker(self): - return self.role == Role.WORKER + def is_first_worker(self): + return self._role == Role.WORKER and self._current_id == 0 - def _is_server(self): - return self.role == Role.SERVER + def worker_index(self): + return self._current_id - def _generate_role(self): - self.role_is_generated_ = True + def server_index(self): + return self._current_id diff --git a/python/paddle/fluid/incubate/fleet/collective/__init__.py b/python/paddle/fluid/incubate/fleet/collective/__init__.py index 49ecaee07a5474bbe92a2dd3947ef555d252fa0e..e381a0d8c7124b8e9dd099ef0d99faa6985a8548 100644 --- a/python/paddle/fluid/incubate/fleet/collective/__init__.py +++ b/python/paddle/fluid/incubate/fleet/collective/__init__.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -import sys import logging import paddle.fluid as fluid @@ -26,37 +25,21 @@ from ..base.fleet_base import DistributedOptimizer class Collective(Fleet): def __init__(self): super(Collective, self).__init__(Mode.COLLECTIVE) - self.local_ip_ = 0 + self._local_ip = 0 - def init(self, role_maker=None): - """ - should be called only once in user's python scripts, - init() will initialize RoleMaker which is used for identifying - current node's role, e.g. worker, server, etc. - - Args: - role_maker(RoleMakerBase): subclass of RoleMakerBase. - - Returns: - None - """ - - super(Collective, self).init(role_maker) - self._role_maker._generate_role() - - def init_worker(self, executor): + def init_worker(self): logging.warn( "You should not call 'init_worker' method for collective mode.") - def run_worker(self, executor, main_program=None): + def run_worker(self, main_programs=None, scopes=None): logging.warn( "You should not call 'run_worker' method for collective mode.") - def init_server(self, executor, model_dir=None): + def init_server(self, model_dir=None): logging.warn( "You should not call 'init_server' method for collective mode.") - def run_server(self, executor): + def run_server(self): logging.warn( "You should not call 'run_server' method for collective mode.") @@ -64,29 +47,28 @@ class Collective(Fleet): logging.warn( "You should not call 'stop_worker' method for collective mode.") - def stop(self, executor): + def stop(self): """ stop(): will be called after a user finishes his/her training task. """ logging.warn("You should not call 'stop' method for collective mode.") def distributed_optimizer(self, optimizer, strategy=None): - self.optimizer = CollectiveOptimizer(optimizer, strategy) - return self.optimizer + self._optimizer = CollectiveOptimizer(optimizer, strategy) + return self._optimizer def save_inference_model(self, - executor, dirname, feeded_var_names=None, target_vars=None, main_program=None, export_for_deployment=True): io.save_inference_model(dirname, feeded_var_names, target_vars, - executor, main_program, None, None, + self._executor, main_program, None, None, export_for_deployment) - def save_persistables(self, executor, dirname, main_program=None): - io.save_persistables(executor, dirname, main_program, None) + def save_persistables(self, dirname, main_program=None): + io.save_persistables(self._executor, dirname, main_program, None) fleet = Collective() @@ -143,9 +125,9 @@ class CollectiveOptimizer(DistributedOptimizer): optimize_ops, param_grads = self._optimizer.minimize( loss, startup_program, parameter_list, no_grad_set) - worker_endpoints = fleet.worker_endpoints - trainer_id = fleet.current_id - current_endpoint = fleet.current_endpoint + worker_endpoints = fleet.worker_endpoints() + trainer_id = fleet.worker_index() + current_endpoint = fleet.worker_endpoints()[trainer_id] startup_program = startup_program if startup_program else \ fluid.framework.default_startup_program diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/distributed_transpiler/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/distributed_transpiler/__init__.py index 5eeac2a7318ed2cf0f03822749ffe043ed6096f9..b2ed351da8c5d3071bd5fcf8860d55a636e09526 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/distributed_transpiler/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/distributed_transpiler/__init__.py @@ -12,12 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import os -import sys -from paddle.fluid.executor import Executor - -from paddle.fluid.framework import Program -from paddle.fluid.framework import default_main_program from paddle.fluid.framework import default_startup_program from paddle.fluid.optimizer import Optimizer @@ -27,7 +22,6 @@ import paddle.fluid.io as io from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspiler as OriginTranspiler -from ...base.role_maker import Role from ...base.fleet_base import Fleet from ...base.fleet_base import Mode from ...base.fleet_base import DistributedOptimizer @@ -44,101 +38,75 @@ class DistributedTranspiler(Fleet): self._startup_program = None self._main_program = None - def init_worker(self, executor): + def init_worker(self): """ `init_worker` has many many functions to do before training, first, wait for all parameter servers launch completely. second, run executor to initialize startup program third, wait for all worker initialize completely. - Args: - executor(Executor): The executor to run for init startup program. - Returns: None """ - if not isinstance(executor, Executor): - raise ValueError("executor must be an instance of Executor") - - if not self._startup_program: - raise ValueError( - "startup_program is None, need invoke DistributedOptimizer.minimize first" - ) - - executor.run(self._startup_program) + pass - def run_worker(self, executor, main_program=None): + def run_worker(self, main_programs=None, scopes=None): pass - def init_server(self, executor, model_dir=None): + def init_server(self, model_dir=None): """ `init_server` has many many functions to do before start pserver, first, run executor to initialize startup program, second, if the `model_dir` is not empty, it will load parameters from it for increment training. Args: - executor(Executor): The executor to run for init server. model_dir(str): The directory path. Returns: None """ - if not isinstance(executor, Executor): - raise ValueError("executor must be an instance of Executor") - if not self._startup_program: raise ValueError( "startup_program is None, need invoke DistributedOptimizer.minimize first" ) - executor.run(self._startup_program) + self._executor.run(self._startup_program) if model_dir: if not os.path.isdir(model_dir): raise ValueError("There is no directory named '%s'", model_dir) - io.load_persistables(executor, model_dir, self._startup_program) + io.load_persistables(self._executor, model_dir, + self._startup_program) - def run_server(self, executor): + def run_server(self): """ `run_server` execute executor to start pserver main program. - Args: - executor(Executor): The executor to run for init server. - Returns: None """ - if not isinstance(executor, Executor): - raise ValueError("executor must be an instance of Executor") - if not self._main_program: raise ValueError( "main_program is None, need invoke DistributedOptimizer.minimize first" ) - executor.run(self._main_program) + self._executor.run(self._main_program) def stop_worker(self): pass - def stop(self, executor): + def stop(self): """ Close this executor. For the distributed training, this method would free the resource on PServers related to the current Trainer. - Args: - executor(Executor): The executor to run for init server. - Returns: None """ - - if not isinstance(executor, Executor): - raise ValueError("executor must be an instance of Executor") - executor.close() + self._executor.close() def distributed_optimizer(self, optimizer, strategy=None): """ @@ -157,11 +125,10 @@ class DistributedTranspiler(Fleet): if not isinstance(optimizer, Optimizer): raise ValueError("optimizer must be an instance of Optimizer") - self.optimizer = TranspilerOptimizer(optimizer, strategy) - return self.optimizer + self._optimizer = TranspilerOptimizer(optimizer, strategy) + return self._optimizer def save_inference_model(self, - executor, dirname, feeded_var_names, target_vars, @@ -172,10 +139,10 @@ class DistributedTranspiler(Fleet): and then save it and all related parameters to given `dirname` by the `executor`. """ io.save_inference_model(dirname, feeded_var_names, target_vars, - executor, main_program, None, None, + self._executor, main_program, None, None, export_for_deployment) - def save_persistables(self, executor, dirname, main_program=None): + def save_persistables(self, dirname, main_program=None): """ This function filters out all variables with `persistable==True` from the give `main_program` and then saves these variables to the folder `dirname` @@ -186,38 +153,56 @@ class DistributedTranspiler(Fleet): files, set `filename` None; if you would like to save all variables in a single file, use `filename` to specify the file name. """ - io.save_persistables(executor, dirname, main_program, None) + io.save_persistables(self._executor, dirname, main_program, None) def _transpile(self, config): - if not isinstance(config, DistributeTranspilerConfig): - raise ValueError( - "config must be an instance of DistributeTranspilerConfig") - self._transpiler = OriginTranspiler(config) self._transpiler.transpile( - trainer_id=fleet.worker_id(), - pservers=fleet.server_endpoints, + trainer_id=fleet.worker_index(), + pservers=fleet.server_endpoints(to_string=True), trainers=fleet.worker_num()) - if self.role == Role.WORKER: + if self.is_worker(): self._main_program = self._transpiler.get_trainer_program() self._startup_program = default_startup_program() else: self._main_program, self._startup_program = \ - self._transpiler.get_pserver_programs(self.current_endpoint) + self._transpiler.get_pserver_programs(self.server_endpoints(self.server_index())) fleet = DistributedTranspiler() class TranspilerOptimizer(DistributedOptimizer): + """ + DistributedOptimizer is a wrapper for paddle.fluid.optimizer + A user should pass a paddle.fluid.optimizer to DistributedOptimizer + minimize() function is implemented. + DistributedOptimizer is the starting point for a user who wants to + run distributed training. The optimized information will be stored in + Fleet() instance who holds the global information about current distributed + training. + + Args: + optimizer(Optimizer): subclass of Optimizer. + strategy(DistributeTranspilerConfig): instance of DistributeTranspilerConfig. + + Returns: + None + """ + def __init__(self, optimizer, strategy=None): super(TranspilerOptimizer, self).__init__(optimizer, strategy) - if strategy and not isinstance(strategy, DistributeTranspilerConfig): - raise ValueError( - "In {} mode, strategy must be an instance of DistributeTranspilerConfig". - format(fleet.mode)) + if strategy: + if not isinstance(strategy, DistributeTranspilerConfig): + raise ValueError( + "In {} mode, strategy must be an instance of DistributeTranspilerConfig". + format(fleet._mode)) + else: + self._strategy = strategy + else: + self._strategy = DistributeTranspilerConfig() def backward(self, loss, @@ -225,24 +210,68 @@ class TranspilerOptimizer(DistributedOptimizer): parameter_list=None, no_grad_set=None, callbacks=None): + """ + First part of `minimize`, do auto-diff to append backward ops for + the current program. + + Args: + loss (Variable): loss variable to run optimizations. + startup_program (Program): startup_program for initializing parameters + in `parameter_list`. + parameter_list (list): list of Variables to update. + no_grad_set (set|None): set of Variables should be ignored. + callbacks (list|None): list of callables to run when appending backward + operator for one parameter. + + Return: + list: list of (param, grad) pair, grad is the output of backward. + + Examples: + See examples in `apply_gradients`. + """ return self._optimizer.backward(loss, startup_program, parameter_list, no_grad_set, callbacks) def apply_gradients(self, params_grads): + """ + Second part of `minimize`, appending optimization operators for + given `params_grads` pairs. + + Args: + params_grads (list): list of (param, grad) pair to do optimization. + + Returns: + list: A list of operators appended to the current program. + + Examples: + .. code-block:: python + + loss = network() + optimizer = fluid.optimizer.SGD(learning_rate=0.1) + params_grads = optimizer.backward(loss) + # you may append operations for params_grads here + # ... + optimizer.apply_gradients(params_grads) + """ return self._optimizer.apply_gradients(params_grads) def minimize(self, loss, + scope=None, startup_program=None, parameter_list=None, no_grad_set=None): - optimize_ops, params_grads = self._optimizer.minimize( - loss, startup_program, parameter_list, no_grad_set) - self.transpile() - return optimize_ops, params_grads - def transpile(self): - if self._strategy is None: - self._strategy = DistributeTranspilerConfig() + if isinstance(loss, list): + raise ValueError( + "DistributedTranspiler's minimize can not accept loss with list") + if isinstance(startup_program, list): + raise ValueError( + "DistributedTranspiler's minimize can not accept program with list" + ) + + optimize_ops, params_grads = self._optimizer.minimize( + loss, startup_program, parameter_list, no_grad_set) fleet._transpile(config=self._strategy) + return optimize_ops, params_grads diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py index b472c20bc132ea343b9a3261a6e218565cbaea25..ec066187c238815a5b262fb752d10ad6a5730cbe 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/__init__.py @@ -28,63 +28,56 @@ class PSLib(Fleet): def __init__(self): super(PSLib, self).__init__(Mode.PSLIB) self._opt_info = None - self.local_ip_ = 0 + self._local_ip = 0 self._fleet_ptr = None + self._main_programs = [] + self._scopes = [] - def init(self, role_maker=None): - super(PSLib, self).init(MPISymetricRoleMaker()) + def init(self, executor, role_maker=None): + super(PSLib, self).init(executor, MPISymetricRoleMaker()) self._fleet_ptr = fluid.core.Fleet() - def init_worker(self, executor): - pass - - def run_worker(self, executor, main_program=None): + def init_worker(self): """ init_worker(): will be called by user. When a user knows current process is_server(), he/she should call init_worker() to initialize global information about worker and connect worker with pserver. You should run startup program before init_worker. Args: - programs(Program|list): a Program or a list of Programs - scopes(Scope|list): a Scope or a list of Scopes, default None. + executor(Executor): The executor to run for init server. + programs(Program|None): The program that need to run. """ - if not isinstance(main_program, Program): - raise ValueError("main_program must be an instance of Program") - programs = [main_program] - scopes = [fluid.global_scope()] * len(programs) + if len(self._main_programs) == 0: + raise ValueError( + "You should run DistributedOptimizer.minimize() first") - if len(scopes) != len(programs): - print( - "You should make sure len(scopes) == len(programs) or set scopes None" - ) - sys.exit(-1) if self._opt_info: if "fleet_desc" in self._opt_info: self._dist_desc_str = text_format.MessageToString( self._opt_info["fleet_desc"]) self._dist_desc = self._opt_info["fleet_desc"] else: - print("You should run DistributedOptimizer.minimize() first") - sys.exit(-1) + raise Exception( + "You should run DistributedOptimizer.minimize() first") # barrier_all for init_server, wait for server starts - self.role_maker_._barrier_all() - self.all_ips_ = self.role_maker_._all_gather(self.local_ip_) + self._role_maker._barrier_all() + self.all_ips_ = self._role_maker._all_gather(self._local_ip) self._fleet_ptr.init_worker(self._dist_desc_str, self.all_ips_, - self.role_maker_._get_size(), - self.role_maker_._get_rank()) + self._role_maker._get_size(), + self._role_maker._get_rank()) # barrier_all for init_worker - self.role_maker_._barrier_all() + self._role_maker._barrier_all() # prepare for client to client communication info = self._fleet_ptr.get_clients_info() - all_info = self.role_maker_._worker_gather(info[0]) + all_info = self._role_maker._worker_gather(info[0]) self._fleet_ptr.gather_clients(all_info) self._fleet_ptr.create_client2client_connection() # barrier for init model - self.role_maker_._barrier_worker() - if self.role_maker_._is_first_worker(): + self._role_maker._barrier_worker() + if self._role_maker.is_first_worker(): tables = self._dist_desc.trainer_param.dense_table - for prog, scope in zip(programs, scopes): + for prog, scope in zip(self._main_programs, self._scopes): prog_id = str(id(prog)) prog_conf = self._opt_info['program_configs'][prog_id] prog_tables = {} @@ -100,24 +93,23 @@ class PSLib(Fleet): for i in range(0, len(table.dense_variable_name)): var_name = table.dense_variable_name[i] if scope.find_var(var_name) is None: - print("var " + var_name + - " not found in scope, " + - "you should run startup program first") - sys.exit(-1) + raise ValueError( + "var " + var_name + " not found in scope, " + + "you should run startup program first") var_name_list.append(var_name) self._fleet_ptr.init_model(scope, int(table.table_id), var_name_list) # barrier for init model done - self.role_maker_._barrier_worker() + self._role_maker._barrier_worker() else: raise NameError( "You should run DistributedOptimizer.minimize() first") - def init_server(self, executor, model_dir=None): + def init_server(self, model_dir=None): pass - def run_server(self, executor): + def run_server(self): """ init_pserver(): will be called by user. When a user knows current process is_worker(), he/she should call init_pserver() to initialize global information about parameter server @@ -128,22 +120,22 @@ class PSLib(Fleet): self._opt_info["fleet_desc"]) self._dist_desc = self._opt_info["fleet_desc"] else: - print("You should run DistributedOptimizer.minimize() first") - sys.exit(-1) + raise Exception( + "You should run DistributedOptimizer.minimize() first") self._fleet_ptr.init_server(self._dist_desc_str, - self.role_maker_._get_rank()) - self.local_ip_ = self._fleet_ptr.run_server() + self._role_maker._get_rank()) + self._local_ip = self._fleet_ptr.run_server() # barrier_all for init_server - self.role_maker_._barrier_all() - self.all_ips_ = self.role_maker_._all_gather(self.local_ip_) + self._role_maker._barrier_all() + self.all_ips_ = self._role_maker._all_gather(self._local_ip) self._fleet_ptr.gather_servers(self.all_ips_, - self.role_maker_._get_size()) + self._role_maker._get_size()) # barrier_all for init_worker, wait all workers start - self.role_maker_._barrier_all() + self._role_maker._barrier_all() else: - raise NameError( + raise Exception( "You should run DistributedOptimizer.minimize() first") def stop_worker(self): @@ -151,31 +143,30 @@ class PSLib(Fleet): stop(): will be called after a user finishes his/her training task. Fleet instance will be destroyed when stop() is called. """ - self.role_maker_._barrier_worker() - if self.role_maker_._is_first_worker(): + self._role_maker._barrier_worker() + if self._role_maker.is_first_worker(): self._fleet_ptr.stop_server() - self.role_maker_._barrier_worker() - self.role_maker_._barrier_all() - self.role_maker_._finalize() + self._role_maker._barrier_worker() + self._role_maker._barrier_all() + self._role_maker._finalize() - def stop(self, executor): + def stop(self): """ stop(): will be called after a user finishes his/her training task. Fleet instance will be destroyed when stop() is called. """ - self.role_maker_._barrier_worker() - if self.role_maker_._is_first_worker(): + self._role_maker._barrier_worker() + if self._role_maker.is_first_worker(): self._fleet_ptr.stop_server() - self.role_maker_._barrier_worker() - self.role_maker_._barrier_all() - self.role_maker_._finalize() + self._role_maker._barrier_worker() + self._role_maker._barrier_all() + self._role_maker._finalize() def distributed_optimizer(self, optimizer, strategy=None): - self.optimizer = DownpourOptimizer(optimizer, strategy) - return self.optimizer + self._optimizer = DownpourOptimizer(optimizer, strategy) + return self._optimizer def save_inference_model(self, - executor, dirname, feeded_var_names=None, target_vars=None, @@ -186,7 +177,7 @@ class PSLib(Fleet): """ self._fleet_ptr.save_model(dirname) - def save_persistables(self, executor, dirname, main_program=None): + def save_persistables(self, dirname, main_program=None): self._fleet_ptr.save_model(dirname) def _set_opt_info(self, opt_info): @@ -208,6 +199,13 @@ class DownpourOptimizer(DistributedOptimizer): run distributed training. The optimized information will be stored in Fleet() instance who holds the global information about current distributed training. + + Args: + optimizer(Optimizer): subclass of Optimizer. + strategy(any): config for DownpourOptimizer. + + Returns: + None """ def __init__(self, optimizer, strategy=None): @@ -242,32 +240,54 @@ class DownpourOptimizer(DistributedOptimizer): raise NotImplementedError() def minimize(self, - loss, - startup_program=None, + losses, + scopes=None, + startup_programs=None, parameter_list=None, no_grad_set=None): """ - minimize a program through loss, loss can be a list in DistributedOptimizer + minimize a program through loss, loss can be a list in DistributedOptimizer. + Note that in parameter server mode, a worker will not get anything about optimize_os + Because optmizer algorithms run on pserver side. We will make this usable in pserver + process, but currently the optimization part is written into Fleet(). A user does not + need to care about how to startup a pserver node. + Args: - loss (Variable|Variable List): loss variable or loss variable list to run optimization. - startup_program (Program): startup_program for initializing parameters + losses (Variable|Variable List): loss variable or loss variable list to run optimization. + scopes (Scope| Scope List): scope instance. + startup_programs (Program|Program List): startup_program for initializing parameters in `parameter_list`. parameter_list (list): list of Variables to update. no_grad_set (set|None): set of Variables should be ignored. + Returns: tuple: (optimize_ops, params_grads) which are, list of operators appended; and list of (param, grad) Variables pair for optimization. - Note that in parameter server mode, a worker will not get anything about optimize_os - Because optmizer algorithms run on pserver side. We will make this usable in pserver - process, but currently the optimization part is written into Fleet(). A user does not - need to care about how to startup a pserver node. """ + + if not isinstance(losses, list): + losses = [losses] + optimize_ops, param_grads, opt_info = \ self._distributed_optimizer._minimize( - loss, - startup_program, + losses, + startup_programs, parameter_list, no_grad_set) fleet._set_opt_info(opt_info) + + programs = [loss.block.program for loss in losses] + + if scopes is None: + scopes = [fluid.global_scope()] * len(programs) + + if len(scopes) != len(programs): + raise ValueError( + "You should make sure len(scopes) == len(programs) or set scopes None" + ) + + fleet._main_programs = programs + fleet._scopes = scopes + return [optimize_ops, param_grads] diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py index 641c294c4a6edeb3d9823b4152b0ea158c8faa80..7a1925a95fd29259c137bc592aff653554381ada 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/node.py @@ -94,7 +94,7 @@ class DownpourServer(Server): Returns: return None """ - table = self.server_.downpour_server_param.downpour_table_param.add() + table = self._server.downpour_server_param.downpour_table_param.add() table.table_id = table_id table.table_class = "DownpourDenseTable" table.type = pslib.PS_DENSE_TABLE @@ -169,7 +169,7 @@ class DownpourWorker(Worker): Returns: return None """ - table = self.worker_.sparse_table.add() + table = self._worker.sparse_table.add() table.table_id = table_id table.slot_key.extend([var.name for var in slot_key_vars]) table.slot_value.extend([var.name for var in slot_value_vars]) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py index ba1f2c8f6ba43bcdb8d4240e33210370e5a454f6..31f964a0e341cf0a4f1bc551f3bea1a6a47d108e 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py @@ -66,8 +66,6 @@ class DistributedAdam(DistributedOptimizerImplBase): Returns: [optimize_ops, grads_and_weights] """ - if not isinstance(losses, list): - losses = [losses] table_name = find_distributed_lookup_table(losses[0].block.program) prefetch_slots = find_distributed_lookup_table_inputs( @@ -77,7 +75,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ps_param = pslib.PSParameter() server = DownpourServer() - worker = DownpourWorker(self.window_) + worker = DownpourWorker(self._window) sparse_table_index = 0 server.add_sparse_table(sparse_table_index, self._learning_rate, prefetch_slots, prefetch_slots_emb) @@ -88,17 +86,12 @@ class DistributedAdam(DistributedOptimizerImplBase): param_grads_list = [] for loss_index in range(len(losses)): - #program_config = ps_param.trainer_param.program_config.add() - #program_config.program_id = str( - # id(losses[loss_index].block.program)) program_id = str(id(losses[loss_index].block.program)) program_configs[program_id] = { "pull_sparse": [sparse_table_index], "push_sparse": [sparse_table_index] } - #program_config.pull_sparse_table_id.extend([sparse_table_index]) - #program_config.push_sparse_table_id.extend([sparse_table_index]) params_grads = sorted( fluid.backward.append_backward(losses[loss_index], parameter_list, no_grad_set), @@ -130,8 +123,6 @@ class DistributedAdam(DistributedOptimizerImplBase): params, grads) program_configs[program_id]["pull_dense"] = [dense_table_index] program_configs[program_id]["push_dense"] = [dense_table_index] - #program_config.pull_dense_table_id.extend([dense_table_index]) - #program_config.push_dense_table_id.extend([dense_table_index]) if len(data_norm_params) != 0 and len(data_norm_grads) != 0: dense_table_index += 1 server.add_data_norm_table(dense_table_index, @@ -139,18 +130,13 @@ class DistributedAdam(DistributedOptimizerImplBase): data_norm_params, data_norm_grads) worker.add_dense_table(dense_table_index, self._learning_rate, data_norm_params, data_norm_grads) - #program_config.pull_dense_table_id.extend([dense_table_index]) - #program_config.push_dense_table_id.extend([dense_table_index]) program_configs[program_id]["pull_dense"].extend( [dense_table_index]) program_configs[program_id]["push_dense"].extend( [dense_table_index]) dense_table_index += 1 - #program_configs.append(program_config) ps_param.server_param.CopyFrom(server.get_desc()) ps_param.trainer_param.CopyFrom(worker.get_desc()) - #for program_config in program_configs: - # ps_param.trainer_param.program_config.extend([program_config]) # Todo(guru4elephant): figure out how to support more sparse parameters # currently only support lookup_table worker_skipped_ops = ["lookup_table", "lookup_table_grad"] diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 86596bd9c8f03d953b4df3efe876527f30eebf84..58819efea04218790a0c67c7db2c1e11b9f16f00 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -205,6 +205,8 @@ class UniformInitializer(Initializer): Examples: .. code-block:: python + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[1], dtype='float32') fc = fluid.layers.fc(input=x, size=10, param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5)) """ @@ -366,6 +368,8 @@ class TruncatedNormalInitializer(Initializer): Examples: .. code-block:: python + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[1], dtype='float32') fc = fluid.layers.fc(input=x, size=10, param_attr=fluid.initializer.TruncatedNormal(loc=0.0, scale=2.0)) """ @@ -471,6 +475,8 @@ class XavierInitializer(Initializer): Examples: .. code-block:: python + import paddle.fluid as fluid + queries = fluid.layers.data(name='x', shape=[1], dtype='float32') fc = fluid.layers.fc( input=queries, size=10, param_attr=fluid.initializer.Xavier(uniform=False)) diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 16524d385f65340aba40728bd41451bc1c444d55..b573093c3025acead94cc0019f69ec8ca8e1527f 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -17,8 +17,6 @@ from __future__ import print_function import os import errno import warnings -import time -import shutil import six import logging from functools import reduce @@ -168,6 +166,7 @@ def save_vars(executor, # var_a, var_b and var_c will be saved. And they are going to be # saved in the same file named 'var_file' in the path "./my_paddle_model". """ + save_dirname = os.path.normpath(dirname) if vars is None: if main_program is None: main_program = default_main_program() @@ -177,7 +176,7 @@ def save_vars(executor, save_vars( executor, main_program=main_program, - dirname=dirname, + dirname=save_dirname, vars=list(filter(predicate, main_program.list_vars())), filename=filename) else: @@ -200,7 +199,9 @@ def save_vars(executor, type='save', inputs={'X': [new_var]}, outputs={}, - attrs={'file_path': os.path.join(dirname, new_var.name)}) + attrs={ + 'file_path': os.path.join(save_dirname, new_var.name) + }) else: save_var_map[new_var.name] = new_var @@ -213,7 +214,7 @@ def save_vars(executor, type='save_combine', inputs={'X': save_var_list}, outputs={}, - attrs={'file_path': os.path.join(dirname, filename)}) + attrs={'file_path': os.path.join(save_dirname, filename)}) executor.run(save_program) @@ -567,6 +568,7 @@ def load_vars(executor, # var_a, var_b and var_c will be loaded. And they are supposed to haven # been saved in the same file named 'var_file' in the path "./my_paddle_model". """ + load_dirname = os.path.normpath(dirname) if vars is None: if main_program is None: main_program = default_main_program() @@ -575,7 +577,7 @@ def load_vars(executor, load_vars( executor, - dirname=dirname, + dirname=load_dirname, main_program=main_program, vars=list(filter(predicate, main_program.list_vars())), filename=filename) @@ -599,7 +601,9 @@ def load_vars(executor, type='load', inputs={}, outputs={'Out': [new_var]}, - attrs={'file_path': os.path.join(dirname, new_var.name)}) + attrs={ + 'file_path': os.path.join(load_dirname, new_var.name) + }) else: load_var_map[new_var.name] = new_var @@ -612,7 +616,7 @@ def load_vars(executor, type='load_combine', inputs={}, outputs={"Out": load_var_list}, - attrs={'file_path': os.path.join(dirname, filename)}) + attrs={'file_path': os.path.join(load_dirname, filename)}) executor.run(load_prog) @@ -913,13 +917,32 @@ def save_inference_model(dirname, Examples: .. code-block:: python - exe = fluid.Executor(fluid.CPUPlace()) + import paddle.fluid as fluid + path = "./infer_model" - fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], - target_vars=[predict_var], executor=exe) - # In this exsample, the function will prune the default main program - # to make it suitable for infering the `predict_var`. The pruned + # User defined network, here a softmax regresssion example + image = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) + predict = fluid.layers.fc(input=image, size=10, act='softmax') + + loss = fluid.layers.cross_entropy(input=predict, label=label) + avg_loss = fluid.layers.mean(loss) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + + # Feed data and train process + + # Save inference model. Note we don't save label and loss in this example + fluid.io.save_inference_model(dirname=path, + feeded_var_names=['img'], + target_vars=[predict], + executor=exe) + + # In this example, the function will prune the default main program + # to make it suitable for infering the `predict` var. The pruned # inference program is going to be saved in the "./infer_model/__model__" # and parameters are going to be saved in separate files under folder # "./infer_model". @@ -966,8 +989,10 @@ def save_inference_model(dirname, target_var_name_list = [var.name for var in target_vars] # when a pserver and a trainer running on the same machine, mkdir may conflict + save_dirname = dirname try: - os.makedirs(dirname) + save_dirname = os.path.normpath(dirname) + os.makedirs(save_dirname) except OSError as e: if e.errno != errno.EEXIST: raise @@ -976,7 +1001,7 @@ def save_inference_model(dirname, model_basename = os.path.basename(model_filename) else: model_basename = "__model__" - model_basename = os.path.join(dirname, model_basename) + model_basename = os.path.join(save_dirname, model_basename) # When export_for_deployment is true, we modify the program online so that # it can only be loaded for inference directly. If it's false, the whole @@ -1019,7 +1044,7 @@ def save_inference_model(dirname, if params_filename is not None: params_filename = os.path.basename(params_filename) - save_persistables(executor, dirname, main_program, params_filename) + save_persistables(executor, save_dirname, main_program, params_filename) return target_var_name_list @@ -1075,7 +1100,7 @@ def load_inference_model(dirname, # if we need lookup table, we will use: fluid.io.load_inference_model(dirname=path, executor=exe, pserver_endpoints=endpoints) - # In this exsample, the inference program was saved in the + # In this example, the inference program was saved in the # "./infer_model/__model__" and parameters were saved in # separate files in ""./infer_model". # After getting inference program, feed target names and @@ -1083,14 +1108,15 @@ def load_inference_model(dirname, # program to get the inference result. """ - if not os.path.isdir(dirname): + load_dirname = os.path.normpath(dirname) + if not os.path.isdir(load_dirname): raise ValueError("There is no directory named '%s'", dirname) if model_filename is not None: model_filename = os.path.basename(model_filename) else: model_filename = "__model__" - model_filename = os.path.join(dirname, model_filename) + model_filename = os.path.join(load_dirname, model_filename) if params_filename is not None: params_filename = os.path.basename(params_filename) @@ -1103,7 +1129,7 @@ def load_inference_model(dirname, raise ValueError("Unsupported program version: %d\n" % program._version()) # Binary data also need versioning. - load_persistables(executor, dirname, program, params_filename) + load_persistables(executor, load_dirname, program, params_filename) if pserver_endpoints: program = _endpoints_replacement(program, pserver_endpoints) diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 0cc7e601498d313517297b2287f06cfebde79a4e..dd50fc91248bc7a32d60e5dd347061c2c5cbe5bb 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -666,9 +666,10 @@ def yolo_box(x, .. code-block:: python + import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[255, 13, 13], dtype='float32') anchors = [10, 13, 16, 30, 33, 23] - loss = fluid.layers.yolo_box(x=x, class_num=80, anchors=anchors, + loss = fluid.layers.yolo_box(x=x, img_size=608, class_num=80, anchors=anchors, conf_thresh=0.01, downsample_ratio=32) """ helper = LayerHelper('yolo_box', **locals()) @@ -1819,7 +1820,7 @@ def roi_perspective_transform(input, coordinates, and (x3, y3) is the bottom right coordinates, and (x4, y4) is the bottom left coordinates. transformed_height (integer): The height of transformed output. - transformed_height (integer): The width of transformed output. + transformed_width (integer): The width of transformed output. spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0 Returns: @@ -1829,7 +1830,10 @@ def roi_perspective_transform(input, Examples: .. code-block:: python - out = fluid.layers.roi_perspective_transform(input, rois, 7, 7, 1.0) + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[256, 28, 28], dtype='float32') + rois = fluid.layers.data(name='rois', shape=[8], lod_level=1, dtype='float32') + out = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0) """ helper = LayerHelper('roi_perspective_transform', **locals()) dtype = helper.input_dtype() diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index f2b40c23fce615803fe2032dbb0343bfa72c8939..a2538fa0f9d29aa2521415abf3f8035401b5b2c3 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -65,7 +65,7 @@ def data(name, For example if shape=[1], the resulting shape is [-1, 1]. 2. If shape contains -1, such as shape=[1, -1], append_batch_size will be enforced to be be False (ineffective). - dtype(basestring): The type of data : float32, float_16, int etc + dtype(np.dtype|VarType|str): The type of data : float32, float16, int etc type(VarType): The output type. By default it is LOD_TENSOR. lod_level(int): The LoD Level. 0 means the input data is not a sequence. stop_gradient(bool): A boolean that mentions whether gradient should flow. @@ -377,7 +377,7 @@ def open_recordio_file(filename, >>> import paddle.fluid as fluid >>> reader = fluid.layers.io.open_recordio_file( >>> filename='./data.recordio', - >>> shapes=[(3,224,224), (1)], + >>> shapes=[(3,224,224), (1,)], >>> lod_levels=[0, 0], >>> dtypes=['float32', 'int64']) >>> # Via the reader, we can use 'read_file' layer to get data: @@ -674,100 +674,114 @@ def py_reader(capacity, Variable: A Reader from which we can get feeding data. Examples: + 1. The basic usage of :code:`py_reader` is as follows: + + .. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(image, label): + # user defined network, here a softmax regresssion example + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + + reader = fluid.layers.py_reader(capacity=64, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64']) + reader.decorate_paddle_reader( + paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), + buf_size=1000)) + + img, label = fluid.layers.read_file(reader) + loss = network(img, label) + + fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) + exe = fluid.ParallelExecutor(use_cuda=True) + for epoch_id in range(10): + reader.start() + try: + while True: + exe.run(fetch_list=[loss.name]) + except fluid.core.EOFException: + reader.reset() + + fluid.io.save_inference_model(dirname='./model', + feeded_var_names=[img.name, label.name], + target_vars=[loss], + executor=fluid.Executor(fluid.CUDAPlace(0))) + + 2. When training and testing are both performed, two different + :code:`py_reader` should be created with different names, e.g.: - 1. The basic usage of :code:`py_reader` is as follows: - - >>> import paddle.fluid as fluid - >>> import paddle.dataset.mnist as mnist - >>> - >>> reader = fluid.layers.py_reader(capacity=64, - >>> shapes=[(-1,3,224,224), (-1,1)], - >>> dtypes=['float32', 'int64']) - >>> reader.decorate_paddle_reader( - >>> paddle.reader.shuffle(paddle.batch(mnist.train()))) - >>> - >>> img, label = fluid.layers.read_file(reader) - >>> loss = network(img, label) # some network definition - >>> - >>> fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) - >>> - >>> exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) - >>> for epoch_id in range(10): - >>> reader.start() - >>> try: - >>> while True: - >>> exe.run(fetch_list=[loss.name]) - >>> except fluid.core.EOFException: - >>> reader.reset() - >>> - >>> ... - >>> - >>> fluid.io.save_inference_model(dirname='./model', feeded_var_names=[img, label], - >>> target_vars=[loss], executor=fluid.Executor(fluid.CUDAPlace(0))) - - 2. When training and testing are both performed, two different - :code:`py_reader` should be created with different names, e.g.: - - >>> import paddle.fluid as fluid - >>> import paddle.dataset.mnist as mnist - >>> - >>> def network(reader): - >>> img, label = fluid.layers.read_file(reader) - >>> # Here, we omitted the network definition - >>> return loss - >>> - >>> train_reader = fluid.layers.py_reader(capacity=64, - >>> shapes=[(-1,3,224,224), (-1,1)], - >>> dtypes=['float32', 'int64'], - >>> name='train_reader') - >>> train_reader.decorate_paddle_reader( - >>> paddle.reader.shuffle(paddle.batch(mnist.train()))) - >>> - >>> test_reader = fluid.layers.py_reader(capacity=32, - >>> shapes=[(-1,3,224,224), (-1,1)], - >>> dtypes=['float32', 'int64'], - >>> name='test_reader') - >>> test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) - >>> - >>> # Create train_main_prog and train_startup_prog - >>> train_main_prog = fluid.Program() - >>> train_startup_prog = fluid.Program() - >>> with fluid.program_guard(train_main_prog, train_startup_prog): - >>> # Use fluid.unique_name.guard() to share parameters with test program - >>> with fluid.unique_name.guard(): - >>> train_loss = network(train_reader) # some network definition - >>> adam = fluid.optimizer.Adam(learning_rate=0.01) - >>> adam.minimize(loss) - >>> - >>> # Create test_main_prog and test_startup_prog - >>> test_main_prog = fluid.Program() - >>> test_startup_prog = fluid.Program() - >>> with fluid.program_guard(test_main_prog, test_startup_prog): - >>> # Use fluid.unique_name.guard() to share parameters with train program - >>> with fluid.unique_name.guard(): - >>> test_loss = network(test_reader) - >>> - >>> fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog) - >>> fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog) - >>> - >>> train_exe = fluid.ParallelExecutor(use_cuda=True, - >>> loss_name=train_loss.name, main_program=train_main_prog) - >>> test_exe = fluid.ParallelExecutor(use_cuda=True, - >>> loss_name=test_loss.name, main_program=test_main_prog) - >>> for epoch_id in range(10): - >>> train_reader.start() - >>> try: - >>> while True: - >>> train_exe.run(fetch_list=[train_loss.name]) - >>> except fluid.core.EOFException: - >>> train_reader.reset() - >>> - >>> test_reader.start() - >>> try: - >>> while True: - >>> test_exe.run(fetch_list=[test_loss.name]) - >>> except fluid.core.EOFException: - >>> test_reader.reset() + .. code-block:: python + + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(reader): + img, label = fluid.layers.read_file(reader) + # User defined network. Here a simple regression as example + predict = fluid.layers.fc(input=img, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=predict, label=label) + return fluid.layers.mean(loss) + + # Create train_main_prog and train_startup_prog + train_main_prog = fluid.Program() + train_startup_prog = fluid.Program() + with fluid.program_guard(train_main_prog, train_startup_prog): + # Use fluid.unique_name.guard() to share parameters with test program + with fluid.unique_name.guard(): + train_reader = fluid.layers.py_reader(capacity=64, + shapes=[(-1, 1, 28, 28), + (-1, 1)], + dtypes=['float32', 'int64'], + name='train_reader') + train_reader.decorate_paddle_reader( + paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), + buf_size=500)) + train_loss = network(train_reader) # some network definition + adam = fluid.optimizer.Adam(learning_rate=0.01) + adam.minimize(train_loss) + + # Create test_main_prog and test_startup_prog + test_main_prog = fluid.Program() + test_startup_prog = fluid.Program() + with fluid.program_guard(test_main_prog, test_startup_prog): + # Use fluid.unique_name.guard() to share parameters with train program + with fluid.unique_name.guard(): + test_reader = fluid.layers.py_reader(capacity=32, + shapes=[(-1, 1, 28, 28), (-1, 1)], + dtypes=['float32', 'int64'], + name='test_reader') + test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) + test_loss = network(test_reader) + + fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog) + fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog) + + train_exe = fluid.ParallelExecutor(use_cuda=True, + loss_name=train_loss.name, + main_program=train_main_prog) + test_exe = fluid.ParallelExecutor(use_cuda=True, + loss_name=test_loss.name, + main_program=test_main_prog) + for epoch_id in range(10): + train_reader.start() + try: + while True: + train_exe.run(fetch_list=[train_loss.name]) + except fluid.core.EOFException: + train_reader.reset() + + test_reader.start() + try: + while True: + test_exe.run(fetch_list=[test_loss.name]) + except fluid.core.EOFException: + test_reader.reset() """ return _py_reader( capacity=capacity, @@ -801,31 +815,39 @@ def create_py_reader_by_data(capacity, Variable: A Reader from which we can get feeding data. Examples: + .. code-block:: python - 1. The basic usage of :code:`py_reader` is as follows: - - >>> import paddle.fluid as fluid - >>> import paddle.dataset.mnist as mnist - >>> - >>> image = fluid.layers.data(name='image', shape=[3,224,224], dtypes='float32') - >>> label = fluid.layers.data(name='label', shape=[1], dtypes='int64') - >>> reader = fluid.layers.create_py_reader_by_data(capacity=64, feed_list=[image, label]) - >>> reader.decorate_paddle_reader( - >>> paddle.reader.shuffle(paddle.batch(mnist.train()))) - >>> - >>> img, label = fluid.layers.read_file(reader) - >>> loss = network(img, label) # some network definition - >>> - >>> fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) - >>> - >>> exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) - >>> for epoch_id in range(10): - >>> reader.start() - >>> try: - >>> while True: - >>> exe.run(fetch_list=[loss.name]) - >>> except fluid.core.EOFException: - >>> reader.reset() + import paddle + import paddle.fluid as fluid + import paddle.dataset.mnist as mnist + + def network(img, label): + # User defined network. Here a simple regression as example + predict = fluid.layers.fc(input=img, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=predict, label=label) + return fluid.layers.mean(loss) + + image = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + reader = fluid.layers.create_py_reader_by_data(capacity=64, + feed_list=[image, label]) + reader.decorate_paddle_reader( + paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5), + buf_size=500)) + + img, label = fluid.layers.read_file(reader) + loss = network(img, label) # some network definition + + fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) + + exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + for epoch_id in range(10): + reader.start() + try: + while True: + exe.run(fetch_list=[loss.name]) + except fluid.core.EOFException: + reader.reset() """ return _py_reader( capacity=capacity, @@ -874,7 +896,7 @@ def open_files(filenames, reader = fluid.layers.io.open_files(filenames=['./data1.recordio', './data2.recordio'], - shapes=[(3,224,224), (1)], + shapes=[(3,224,224), (1,)], lod_levels=[0, 0], dtypes=['float32', 'int64']) @@ -993,7 +1015,7 @@ def batch(reader, batch_size): raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio', './data2.recordio'], - shapes=[(3,224,224), (1)], + shapes=[(3,224,224), (1,)], lod_levels=[0, 0], dtypes=['float32', 'int64'], thread_num=2, @@ -1102,6 +1124,12 @@ class Preprocessor(object): Examples: .. code-block:: python + reader = fluid.layers.io.open_files( + filenames=['./data1.recordio', './data2.recordio'], + shapes=[(3, 224, 224), (1, )], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + preprocessor = fluid.layers.io.Preprocessor(reader=reader) with preprocessor.block(): img, lbl = preprocessor.inputs() diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index da6c24100452ba26896c8e7c06a76d874b3f51a2..7391974b14f21129d10a909400a44e4ab0fd778e 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -249,7 +249,13 @@ def generate_activation_fn(op_type): func.__name__ = op_type func.__doc__ = _generate_doc_string_(op_proto) + func.__doc__ = func.__doc__ + """ +Examples: + .. code-block:: python + data = fluid.layers.data(name="input", shape=[32, 784]) + result = fluid.layers.%s(data) +""" % op_type return func diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index a67c8058f2c42713738420e81316452e15acb697..a9fdb10ae017b2b639153e1819b1275b6589624c 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -52,10 +52,17 @@ def noam_decay(d_model, warmup_steps): """ Noam decay method. The numpy implementation of noam decay as follows. - >>> import numpy as np - >>> lr_value = np.power(d_model, -0.5) * np.min([ - >>> np.power(current_steps, -0.5), - >>> np.power(warmup_steps, -1.5) * current_steps]) + .. code-block:: python + + import numpy as np + # set hyper parameters + d_model = 2 + current_steps = 20 + warmup_steps = 200 + # compute + lr_value = np.power(d_model, -0.5) * np.min([ + np.power(current_steps, -0.5), + np.power(warmup_steps, -1.5) * current_steps]) Please reference `attention is all you need `_. @@ -67,6 +74,15 @@ def noam_decay(d_model, warmup_steps): Returns: The decayed learning rate. + Examples: + .. code-block:: python + + import padde.fluid as fluid + warmup_steps = 100 + learning_rate = 0.01 + lr = fluid.layers.learning_rate_scheduler.noam_decay( + 1/(warmup_steps *(learning_rate ** 2)), + warmup_steps) """ with default_main_program()._lr_schedule_guard(): if imperative_base.enabled(): @@ -228,7 +244,7 @@ def polynomial_decay(learning_rate, """ Applies polynomial decay to the initial learning rate. - .. code-block:: python + .. code-block:: text if cycle: decay_steps = decay_steps * ceil(global_step / decay_steps) @@ -247,6 +263,17 @@ def polynomial_decay(learning_rate, Returns: Variable: The decayed learning rate + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + start_lr = 0.01 + total_step = 5000 + end_lr = 0 + lr = fluid.layers.polynomial_decay( + start_lr, total_step, end_lr, power=1) + """ with default_main_program()._lr_schedule_guard(): if imperative_base.enabled(): @@ -281,18 +308,18 @@ def polynomial_decay(learning_rate, def piecewise_decay(boundaries, values): """Applies piecewise decay to the initial learning rate. - The algorithm can be described as the code below. + The algorithm can be described as the code below. - .. code-block:: python + .. code-block:: text - boundaries = [10000, 20000] - values = [1.0, 0.5, 0.1] - if step < 10000: - learning_rate = 1.0 - elif 10000 <= step < 20000: - learning_rate = 0.5 - else: - learning_rate = 0.1 + boundaries = [10000, 20000] + values = [1.0, 0.5, 0.1] + if step < 10000: + learning_rate = 1.0 + elif 10000 <= step < 20000: + learning_rate = 0.5 + else: + learning_rate = 0.1 Args: boundaries: A list of steps numbers. values: A list of learning rate values that will be picked during @@ -301,6 +328,17 @@ def piecewise_decay(boundaries, values): Returns: The decayed learning rate. + Examples: + .. code-block:: python + + import paddle.fluid as fluid + boundaries = [10000, 20000] + values = [1.0, 0.5, 0.1] + optimizer = fluid.optimizer.Momentum( + momentum=0.9, + learning_rate=fluid.layers.piecewise_decay(boundaries=boundaries, values=values), + regularization=fluid.regularizer.L2Decay(1e-4)) + """ with default_main_program()._lr_schedule_guard(): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 428692cc63a9a6a75891b74b6581b4fc34388e86..d179f56c6ca3fb482561fcda2b27316670c99696 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -18,6 +18,7 @@ All layers just related to the neural network. from __future__ import print_function import numpy as np +import warnings import six import os import inspect @@ -154,6 +155,8 @@ __all__ = [ 'elementwise_max', 'elementwise_min', 'elementwise_pow', + 'elementwise_mod', + 'elementwise_floordiv', 'uniform_random_batch_size_like', 'gaussian_random', 'sampling_id', @@ -197,6 +200,7 @@ __all__ = [ 'pixel_shuffle', 'fsp_matrix', 'continuous_value_model', + 'where', ] kIgnoreIndex = -100 @@ -1321,6 +1325,13 @@ def cos_sim(X, Y): Returns: Variable: the output of cosine(X, Y). + + Examples: + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[3, 7], dtype='float32', append_batch_size=False) + y = fluid.layers.data(name='y', shape=[1, 7], dtype='float32', append_batch_size=False) + out = fluid.layers.cos_sim(x, y) """ helper = LayerHelper('cos_sim', **locals()) out = helper.create_variable_for_type_inference(dtype=X.dtype) @@ -1481,7 +1492,10 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex): Examples: .. code-block:: python - predict = fluid.layers.fc(input=net, size=classdim, act='softmax') + classdim = 7 + x = fluid.layers.data(name='x', shape=[3, 7], dtype='float32', append_batch_size=False) + label = fluid.layers.data(name='label', shape=[3, 1], dtype='float32', append_batch_size=False) + predict = fluid.layers.fc(input=x, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) """ if not soft_label: @@ -3032,6 +3046,7 @@ def batch_norm(input, .. code-block:: python + x = fluid.layers.data(name='x', shape=[3, 7, 3, 7], dtype='float32', append_batch_size=False) hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') hidden2 = fluid.layers.batch_norm(input=hidden1) """ @@ -5291,8 +5306,8 @@ def ctc_greedy_decoder(input, blank, name=None): Examples: .. code-block:: python + import paddle.fluid as fluid x = fluid.layers.data(name='x', shape=[8], dtype='float32') - cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0) """ helper = LayerHelper("ctc_greedy_decoder", **locals()) @@ -6099,22 +6114,24 @@ def softmax_with_cross_entropy(logits, soft_label=False, ignore_index=kIgnoreIndex, numeric_stable_mode=True, - return_softmax=False): + return_softmax=False, + axis=-1): """ **Softmax With Cross Entropy Operator.** Cross entropy loss with softmax is used as the output layer extensively. This - operator computes the softmax normalized values for each row of the input - tensor, after which cross-entropy loss is computed. This provides a more - numerically stable gradient. + operator computes the softmax normalized values for dimension :attr:`axis` of + the input tensor, after which cross-entropy loss is computed. This provides + a more numerically stable gradient. Because this operator performs a softmax on logits internally, it expects unscaled logits. This operator should not be used with the output of softmax operator since that would produce incorrect results. - When the attribute soft_label is set false, this operators expects mutually - exclusive hard labels, each sample in a batch is in exactly one class with a - probability of 1.0. Each sample in the batch will have a single label. + When the attribute :attr:`soft_label` is set :attr:`False`, this operators + expects mutually exclusive hard labels, each sample in a batch is in exactly + one class with a probability of 1.0. Each sample in the batch will have a + single label. The equation is as follows: @@ -6133,7 +6150,8 @@ def softmax_with_cross_entropy(logits, \\left(\\text{logit}_i - \\log\\left(\\sum_{i=0}^{K} \\exp(\\text{logit}_i)\\right)\\right), j = 1,...,K - 3) If numeric_stable_mode is True, softmax is calculated first by: + 3) If :attr:`numeric_stable_mode` is :attr:`True`, softmax is calculated + first by: .. math:: @@ -6146,32 +6164,39 @@ def softmax_with_cross_entropy(logits, and then cross entropy loss is calculated by softmax and label. Args: - logits (Variable): The unscaled log probabilities, which is a 2-D tensor - with shape [N x K]. N is the batch_size, and K is the class number. - label (Variable): The ground truth which is a 2-D tensor. If soft_label - is set to false, Label is a Tensor with shape [N x 1]. If - soft_label is set to true, Label is a Tensor with + logits (Variable): The input tensor of unscaled log probabilities. + label (Variable): The ground truth tensor. If :attr:`soft_label` + is set to :attr:`True`, Label is a Tensor in the + same shape with :attr:`logits`. If :attr:`soft_label` is set to + :attr:`True`, Label is a Tensor in the same shape with + :attr:`logits` expect shape in dimension :attr:`axis` as 1. soft_label (bool): A flag to indicate whether to interpretate the given - labels as soft labels. By default, `soft_label` is set to False. + labels as soft labels. Default False. ignore_index (int): Specifies a target value that is ignored and does not contribute to the input gradient. Only valid - if soft_label is set to False. Default: kIgnoreIndex + if :attr:`soft_label` is set to :attr:`False`. + Default: kIgnoreIndex numeric_stable_mode (bool): A flag to indicate whether to use a more numerically stable algorithm. Only valid - when soft_label is False and GPU is used. - When soft_label is True or CPU is used, - the algorithm is always numerically stable. + when :attr:`soft_label` is :attr:`False` + and GPU is used. When :attr:`soft_label` + is :attr:`True` or CPU is used, the + algorithm is always numerically stable. Note that the speed may be slower when use stable algorithm. Default: True return_softmax (bool): A flag indicating whether to return the softmax along with the cross entropy loss. Default: False + axis (int): The index of dimension to perform softmax calculations. It + should be in range :math:`[-1, rank - 1]`, while :math:`rank` + is the rank of input :attr:`logits`. Default: -1. Returns: Variable or Tuple of two Variables: Return the cross entropy loss if \ `return_softmax` is False, otherwise the tuple \ - (loss, softmax), where the cross entropy loss is \ - a 2-D tensor with shape [N x 1], and softmax is a \ - 2-D tensor with shape [N x K]. + (loss, softmax), softmax is in the same shape \ + with input logits and cross entropy loss is in \ + the same shape with input logits except shape \ + in dimension :attr:`axis` as 1. Examples: .. code-block:: python @@ -6194,7 +6219,8 @@ def softmax_with_cross_entropy(logits, attrs={ 'soft_label': soft_label, 'ignore_index': ignore_index, - 'numeric_stable_mode': numeric_stable_mode + 'numeric_stable_mode': numeric_stable_mode, + 'axis': axis }) if return_softmax: @@ -6769,7 +6795,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): .. math:: - Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C, c + n/2)}_{j = \\max(0, c - n/2)}(Input(j, x, y))^2\\right)^{\\beta} + Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta} In the above equation: @@ -6874,6 +6900,8 @@ def pad(x, paddings, pad_value=0., name=None): .. code-block:: python # x is a rank 2 tensor variable. + import paddle.fluid as fluid + x = fluid.layers.data(name='data', shape=[224], dtype='float32') out = fluid.layers.pad( x=x, paddings=[0, 1, 1, 2], pad_value=0.) """ @@ -6953,6 +6981,9 @@ def pad_constant_like(x, y, pad_value=0., name=None): # x is a rank 4 tensor variable, x.shape = (2, 3, 2, 3) # y is a rank 4 tensor variable, y.shape = (1, 3, 1, 3) + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[2,3,2,3], dtype='float32') + y = fluid.layers.data(name='y', shape=[1,3,1,3], dtype='float32') out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.) # out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3] """ @@ -7151,8 +7182,11 @@ def dice_loss(input, label, epsilon=0.00001): Examples: .. code-block:: python + import paddle.fluid as fluid + x = fluid.layers.data(name='data', shape = [3, 224, 224, 2], dtype='float32') + label = fluid.layers.data(name='label', shape=[3, 224, 224, 1], dtype='float32') predictions = fluid.layers.softmax(x) - loss = fluid.layers.dice_loss(input=predictions, label=label, 2) + loss = fluid.layers.dice_loss(input=predictions, label=label) """ label = one_hot(label, depth=input.shape[-1]) reduce_dim = list(range(1, len(input.shape))) @@ -7877,6 +7911,7 @@ def relu(x, name=None): .. code-block:: python + x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") output = fluid.layers.relu(x) """ helper = LayerHelper('relu', **locals()) @@ -8022,9 +8057,9 @@ def crop(x, shape=None, offsets=None, name=None): is suitable for the case that the output shape may be changed each iteration. If a list/tupe of integer, it's length must be the same as the rank of `x` - offsets (Variable|list/tuple of integer|None): Specifies the copping + offsets (Variable|list/tuple of integer|None): Specifies the cropping offsets at each dimension. It can be a Variable or or a list/tupe - of integer. If a tensor Variable, it's rank must be the same as `x`. + of integers. If a tensor Variable, it's rank must be the same as `x`. This way is suitable for the case that the offsets may be changed each iteration. If a list/tupe of integer, it's length must be the same as the rank of `x`. If None, the offsets are 0 at each @@ -8042,6 +8077,7 @@ def crop(x, shape=None, offsets=None, name=None): .. code-block:: python + import paddle.fluid as fluid x = fluid.layers.data(name="x", shape=[3, 5], dtype="float32") y = fluid.layers.data(name="y", shape=[2, 3], dtype="float32") crop = fluid.layers.crop(x, shape=y) @@ -8166,6 +8202,7 @@ def affine_grid(theta, out_shape, name=None): .. code-block:: python + import paddle.fluid as fluid theta = fluid.layers.data(name="x", shape=[2, 3], dtype="float32") out_shape = fluid.layers.data(name="y", shape=[-1], dtype="float32") data = fluid.layers.affine_grid(theta, out_shape) @@ -9672,6 +9709,7 @@ def clip(x, min, max, name=None): Examples: .. code-block:: python + import paddle.fluid as fluid input = fluid.layers.data( name='data', shape=[1], dtype='float32') reward = fluid.layers.clip(x=input, min=-1.0, max=1.0) @@ -10219,8 +10257,7 @@ def hash(input, hash_size, num_hash=1, name=None): Examples: .. code-block:: python - word_dict = paddle.dataset.imdb.word_dict() - x = fluid.layers.data(shape[1], dtype='int32', lod_level=1) + x = fluid.layers.data(name="x", shape=[1], dtype='int32', lod_level=1) out = fluid.layers.hash(input=x, num_hash=4, hash_size=1000) """ helper = LayerHelper('hash', **locals()) @@ -10900,7 +10937,11 @@ def psroi_pool(input, Args: input (Variable): ${x_comment} - rois (Variable): ROIs (Regions of Interest) to pool over. + rois (Variable): ROIs (Regions of Interest) to pool over.It should be + a 2-D LoDTensor of shape (num_rois, 4), the lod level + is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is + the top left coordinates, and (x2, y2) is the bottom + right coordinates. output_channels (integer): ${output_channels_comment} spatial_scale (float): ${spatial_scale_comment} Default: 1.0 pooled_height (integer): ${pooled_height_comment} Default: 1 @@ -10913,7 +10954,10 @@ def psroi_pool(input, Examples: .. code-block:: python - pool_out = fluid.layers.psroi_pool(input=x, rois=rois, 490, 1.0, 7, 7) + import paddle.fluid as fluid + x = fluid.layers.data(name='x', shape=[490, 28, 28], dtype='float32') + rois = fluid.layers.data(name='rois', shape=[4], lod_level=1, dtype='float32') + pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7) """ helper = LayerHelper('psroi_pool', **locals()) # check attrs @@ -11298,3 +11342,38 @@ def continuous_value_model(input, cvm, use_cvm=True): outputs={'Y': [out]}, attrs={"use_cvm": use_cvm}) return out + + +def where(condition): + """ + Return an int64 tensor with rank 2, specifying the coordinate of true element in `condition`. + + Output's first dimension is the number of true element, second dimension is rank(number of dimension) of `condition`. + If there is zero true element, then an empty tensor will be generated. + + Args: + condition(Variable): A bool tensor with rank at least 1. + + Returns: + Variable: The tensor variable storing a 2-D tensor. + + Examples: + .. code-block:: python + + # condition is a tensor [True, False, True] + out = fluid.layers.where(condition) # [[0], [2]] + + # condition is a tensor [[True, False], [False, True]] + out = fluid.layers.where(condition) # [[0, 0], [1, 1]] + + # condition is a tensor [False, False, False] + out = fluid.layers.where(condition) # [[]] + """ + helper = LayerHelper("where", **locals()) + + out = helper.create_variable_for_type_inference( + dtype=core.VarDesc.VarType.INT64) + + helper.append_op( + type='where', inputs={'Condition': condition}, outputs={'Out': [out]}) + return out diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index d1681580bebc454d26be518180b649bfb3c76e4e..9a0afcd4516ed20a5a723109256ab8b8ba204922 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -28,7 +28,7 @@ __all__ = [ 'tensor_array_to_tensor', 'concat', 'sums', 'assign', 'fill_constant_batch_size_like', 'fill_constant', 'argmin', 'argmax', 'argsort', 'ones', 'zeros', 'reverse', 'has_inf', 'has_nan', 'isfinite', - 'range', 'linspace', 'zeros_like' + 'range', 'linspace', 'zeros_like', 'diag' ] @@ -188,7 +188,11 @@ def concat(input, axis=0, name=None): Examples: .. code-block:: python - out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) + a = fluid.layers.data(name='a', shape=[2, 13], dtype='float32') + b = fluid.layers.data(name='b', shape=[2, 3], dtype='float32') + c = fluid.layers.data(name='c', shape=[2, 2], dtype='float32') + d = fluid.layers.data(name='d', shape=[2, 5], dtype='float32') + out = fluid.layers.concat(input=[a, b, c, d], axis=2) """ helper = LayerHelper('concat', **locals()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) @@ -460,8 +464,9 @@ def argmin(x, axis=0): Examples: .. code-block:: python - out = fluid.layers.argmin(x=in, axis=0) - out = fluid.layers.argmin(x=in, axis=-1) + x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") + out = fluid.layers.argmin(x, axis=0) + out = fluid.layers.argmin(x, axis=-1) """ helper = LayerHelper("arg_min", **locals()) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) @@ -491,8 +496,9 @@ def argmax(x, axis=0): Examples: .. code-block:: python - out = fluid.layers.argmax(x=in, axis=0) - out = fluid.layers.argmax(x=in, axis=-1) + x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") + out = fluid.layers.argmax(x, axis=0) + out = fluid.layers.argmax(x, axis=-1) """ helper = LayerHelper("arg_max", **locals()) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) @@ -541,8 +547,8 @@ def argsort(input, axis=-1, name=None): Examples: .. code-block:: python - input = fluid.layers.data(data=[2, 3]) - out, indices = fluid.layers.argsort(input, axis=0) + x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") + out, indices = fluid.layers.argsort(input=x, axis=0) """ helper = LayerHelper("argsort", **locals()) out = helper.create_variable_for_type_inference( @@ -884,3 +890,39 @@ def zeros_like(x, out=None): type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]}) out.stop_gradient = True return out + + +def diag(diagonal): + """ + **diag** + + This function creates a square matrix which has diagonal values specified by `diagonal`. + + Args: + diagonal(Variable|numpy.ndarray): The input tensor specifying diagonal values, should be of rank 1. + + Returns: + Variable: The tensor variable storing the square matrix. + + Examples: + .. code-block:: python + + # [[3, 0, 0] + # [0, 4, 0] + # [0, 0, 5] + data = fluid.layers.diag(np.arange(3, 6)) + + """ + + helper = LayerHelper("diag", **locals()) + + if not isinstance(diagonal, Variable): + diagonal = assign(diagonal) + + out = helper.create_variable_for_type_inference(dtype=diagonal.dtype) + + helper.append_op( + type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]}) + + out.stop_gradient = True + return out diff --git a/python/paddle/fluid/lod_tensor.py b/python/paddle/fluid/lod_tensor.py index b91566fa6fb2449a8becc694b978c30039bf30ed..160b28d69c795b912fd2346ba4b557f393890543 100644 --- a/python/paddle/fluid/lod_tensor.py +++ b/python/paddle/fluid/lod_tensor.py @@ -47,6 +47,13 @@ def create_lod_tensor(data, recursive_seq_lens, place): sentence. This length-based :code:`recursive_seq_lens` [[2, 3]] will be converted to offset-based LoD [[0, 2, 5]] inside the function call. + .. code-block:: python + + import paddle.fluid as fluid + import numpy as np + + t = fluid.create_lod_tensor(np.ndarray([5, 30]), [[2, 3]], fluid.CPUPlace()) + Please reference :ref:`api_guide_low_level_lod_tensor` for more details regarding LoD. @@ -127,6 +134,14 @@ def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, Returns: A fluid LoDTensor object with tensor data and recursive_seq_lens info. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + + t = fluid.create_random_int_lodtensor(recursive_seq_lens=[[2, 3]], + base_shape=[30], place=fluid.CPUPlace(), low=0, high=10) """ assert isinstance(base_shape, list), "base_shape should be a list" # append the total number of basic elements to the front of its shape diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index c961a5c36ed164fe96bc8edb334cfc9099182156..5e511ed2eb9ffaeada45046dbe6c2b7c15ae6d16 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -100,6 +100,7 @@ def simple_img_conv_pool(input, Examples: .. code-block:: python + import paddle.fluid as fluid img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') conv_pool = fluid.nets.simple_img_conv_pool(input=img, filter_size=5, @@ -191,7 +192,6 @@ def img_conv_group(input, img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') conv_pool = fluid.nets.img_conv_group(input=img, - num_channels=3, conv_padding=1, conv_num_filter=[3, 3], conv_filter_size=3, @@ -279,10 +279,11 @@ def sequence_conv_pool(input, Examples: .. code-block:: python - input_dim = len(word_dict) + import paddle.fluid as fluid + input_dim = 100 #len(word_dict) emb_dim = 128 hid_dim = 512 - data = fluid.layers.data( ame="words", shape=[1], dtype="int64", lod_level=1) + data = fluid.layers.data(name="words", shape=[1], dtype="int64", lod_level=1) emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) seq_conv = fluid.nets.sequence_conv_pool(input=emb, num_filters=hid_dim, diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 28126b72a429714dfe66ae709e31d99d843fab74..69bbef77f85bd25674235fbe0a54e9a70d43e714 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -679,12 +679,11 @@ class DGCMomentumOptimizer(MomentumOptimizer): .. code-block:: python optimizer = fluid.optimizer.DGCMomentumOptimizer( - learning_rate=fluid.layers.piecewise_decay( - boundaries=bd, values=lr), - momentum=0.9, - rampup_begin_step=1252, - regularization=fluid.regularizer.L2Decay(1e-4)) - optimizer.minimize(cost) + learning_rate=0.0001, + momentum=0.9, + rampup_step=1000, + rampup_begin_step=1252, + sparsity=[0.999, 0.999]) """ diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index cf10f590ce2c90450047ff046ee3ed206b38322e..a2c6537effafcc2134d05a3f972f88ea3ec985b5 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -37,6 +37,53 @@ class ParallelExecutor(object): is not found, ParallelExecutor will call `multiprocessing.cpu_count` to get the number of CPUs in the system. + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy + import os + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + # NOTE: If you use CPU to run the program, you need + # to specify the CPU_NUM, otherwise, fluid will use + # all the number of the logic core as the CPU_NUM, + # in that case, the batch size of the input should be + # greater than CPU_NUM, if not, the process will be + # failed by an exception. + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + test_program = fluid.default_main_program().clone(for_test=True) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + startup_program.random_seed=1 + exe.run(startup_program) + + train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, + loss_name=loss.name) + test_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=test_program, + share_vars_from=train_exe) + + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = train_exe.run(feed={"X": x}, + fetch_list=[loss.name]) + + loss_data, = test_exe.run(feed={"X": x}, + fetch_list=[loss.name]) + Args: use_cuda (bool): Whether to use CUDA or not. loss_name (str): The loss name must set in training. Default None. @@ -66,16 +113,6 @@ class ParallelExecutor(object): Raises: TypeError: If share_vars_from is provided, but not ParallelExecutor object. - Examples: - .. code-block:: python - - train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) - test_exe = fluid.ParallelExecutor(use_cuda=True, - main_program=test_program, - share_vars_from=train_exe) - - train_loss, = train_exe.run([loss.name], feed=feed_dict) - test_loss, = test_exe.run([loss.name], feed=feed_dict) """ def __init__(self, @@ -152,24 +189,58 @@ class ParallelExecutor(object): assume the data has been splitted into multiple devices, the each element in the list will be copied to each device directly. - For example, if the feed is a dict: - - >>> exe = ParallelExecutor() - >>> # the image will be splitted into devices. If there is two devices - >>> # each device will process an image with shape (24, 1, 28, 28) - >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))}) - - For example, if the feed is a list: + Examples: + .. code-block:: python - >>> exe = ParallelExecutor() - >>> # each device will process each element in the list. - >>> # the 1st device will process an image with shape (48, 1, 28, 28) - >>> # the 2nd device will process an image with shape (32, 1, 28, 28) - >>> # - >>> # you can use exe.device_count to get the device number. - >>> exe.run(feed=[{"image": numpy.random.random(size=(48, 1, 28, 28))}, - >>> {"image": numpy.random.random(size=(32, 1, 28, 28))}, - >>> ]) + import paddle.fluid as fluid + import numpy + import os + + use_cuda = True + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + # NOTE: If you use CPU to run the program, you need + # to specify the CPU_NUM, otherwise, fluid will use + # all the number of the logic core as the CPU_NUM, + # in that case, the batch size of the input should be + # greater than CPU_NUM, if not, the process will be + # failed by an exception. + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + exe = fluid.Executor(place) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + startup_program.random_seed=1 + exe.run(startup_program) + + train_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, + loss_name=loss.name) + + # If the feed is a dict: + # the image will be splitted into devices. If there is two devices + # each device will process an image with shape (5, 1) + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = train_exe.run(feed={"X": x}, + fetch_list=[loss.name]) + + # If the feed is a list: + # each device will process each element in the list. + # the 1st device will process an image with shape (10, 1) + # the 2nd device will process an image with shape (9, 1) + # + # you can use exe.device_count to get the device number. + x2 = numpy.random.random(size=(9, 1)).astype('float32') + loss_data, = train_exe.run(feed=[{"X": x}, {"X": x2}], + fetch_list=[loss.name]) Args: fetch_list(list): The fetched variable names @@ -217,3 +288,68 @@ class ParallelExecutor(object): @property def device_count(self): return len(self._places) + + def drop_local_exe_scopes(self): + """ + Drop the local execution scope immediately. + + During the execution of the Program, the generate intermediate + results are placed in local execution scope, in some model the + creation and deletion of those intermediate results are time-consuming. + To resolve that problem, ParallelExecutor provides an option in + ExecutionStrategy, i.g. num_iteration_per_drop_scope, this option + indicates how many iterations to run before dropping the local execution + scope. But in some situation, each iteration generates different + intermediate results, it will lead to the result that the memory which + is needed by local execution scope gradually increase. And if you want + to run another program at this time, there may be insufficient storage, + At this point you should drop the local execution scope of other Programs. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + import numpy + import os + + use_cuda = True + # NOTE: If you use CPU to run the program, you need + # to specify the CPU_NUM, otherwise, fluid will use + # all the number of the logic core as the CPU_NUM, + # in that case, the batch size of the input should be + # greater than CPU_NUM, if not, the process will be + # failed by an exception. + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe.run(startup_program) + + parallel_exe = fluid.ParallelExecutor(use_cuda=use_cuda, + main_program=train_program, + loss_name=loss.name) + + x = numpy.random.random(size=(10, 1)).astype('float32') + loss_data, = parallel_exe.run(feed={"X": x}, + fetch_list=[loss.name]) + + parallel_exe.drop_local_exe_scopes() + """ + assert isinstance( + self._compiled_program._executor, + core.ParallelExecutor), "The Executor should be ParallelExecutor." + self._compiled_program._executor.drop_local_exe_scopes() + + # This API is used to check whether DropLocalExeScopes can work. + def _need_create_local_exe_scopes(self): + assert isinstance( + self._compiled_program._executor, + core.ParallelExecutor), "The Executor should be ParallelExecutor." + return self._compiled_program._executor._need_create_local_exe_scopes() diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index 38ddf93198d7c58382e36a5b7af488f56e6f9878..b7ce1c0e4f59af6be2dfaa7db79b49f72de65b16 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -48,11 +48,13 @@ class ParamAttr(object): Examples: .. code-block:: python + import paddle.fluid as fluid + w_param_attrs = fluid.ParamAttr(name="fc_weight", learning_rate=0.5, regularizer=fluid.regularizer.L2Decay(1.0), trainable=True) - x = fluid.layers.data(name='X', shape=[1], dtype='float32') + x = fluid.layers.data(name='X', shape=[1], dtype='float32') y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs) """ diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index 74ee2828deb6ecd51ff36b878e97254a62ad1cb6..c2322ec763475ab7aa5780b77b767a6d7550fc39 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -67,26 +67,44 @@ class PyReader(object): the reader manually. .. code-block:: python - - image = fluid.layers.data( - name='image', shape=[784], dtype='float32') - label = fluid.layers.data( - name='label', shape=[1], dtype='int64') - - reader = fluid.io.PyReader(feed_list=[image, label], - capacity=4, iterable=False) - reader.decorate_sample_list_generator(user_defined_reader) - ... # definition of network is omitted - executor.run(fluid.default_main_program()) - for _ in range(EPOCH_NUM): - reader.start() - while True: - try: - executor.run(feed=None, ...) - except fluid.core.EOFException: - reader.reset() - break - + + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 3 + + def reader_creator_random_image_and_label(height, width): + def reader(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label + return reader + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + reader = fluid.io.PyReader(feed_list=[image, label], + capacity=4, + iterable=False) + + user_defined_reader = reader_creator_random_image_and_label(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE)) + # definition of network is omitted + executor = fluid.Executor(fluid.CUDAPlace(0)) + executor.run(fluid.default_startup_program()) + for i in range(EPOCH_NUM): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + + 2. If iterable=True, the created PyReader object is decoupled with the program. No operator would be inserted into the program. In this case, the created reader is a Python generator, which @@ -95,20 +113,31 @@ class PyReader(object): .. code-block:: python - image = fluid.layers.data( - name='image', shape=[784], dtype='float32') - label = fluid.layers.data( - name='label', shape=[1], dtype='int64') - - reader = fluid.io.PyReader(feed_list=[image, label], - capacity=4, iterable=True) - reader.decorate_sample_list_generator(user_defined_reader, - places=fluid.cuda_places()) - ... # definition of network is omitted - executor.run(fluid.default_main_program()) - for _ in range(EPOCH_NUM): - for data in reader(): - executor.run(feed=data, ...) + EPOCH_NUM = 3 + ITER_NUM = 5 + BATCH_SIZE = 10 + + def reader_creator_random_image(height, width): + def reader(): + for i in range(ITER_NUM): + yield np.random.uniform(low=0, high=255, size=[height, width]), + return reader + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=True) + + user_defined_reader = reader_creator_random_image(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), + fluid.core.CUDAPlace(0)) + # definition of network is omitted + executor = fluid.Executor(fluid.CUDAPlace(0)) + executor.run(fluid.default_main_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data) + """ unique_name_generator = UniqueNameGenerator() @@ -237,7 +266,33 @@ class PyReader(object): ''' Start the data feeding thread. Can only call when the reader object is not iterable. - ''' + + Example: + .. code-block:: python + + BATCH_SIZE = 10 + + def generator(): + for i in range(5): + yield np.random.uniform(low=0, high=255, size=[784, 784]), + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader.decorate_sample_list_generator( + paddle.batch(generator, batch_size=BATCH_SIZE)) + + executor = fluid.Executor(fluid.CUDAPlace(0)) + executor.run(fluid.default_startup_program()) + for i in range(3): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + + ''' assert not self._iterable, "start() cannot be called when PyReader is iterable" self._start() @@ -245,6 +300,32 @@ class PyReader(object): ''' Reset the reader object when :code:`fluid.core.EOFException` raises. Can only call when the reader object is not iterable. + + Example: + .. code-block:: python + + BATCH_SIZE = 10 + + def generator(): + for i in range(5): + yield np.random.uniform(low=0, high=255, size=[784, 784]), + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader.decorate_sample_list_generator( + paddle.batch(generator, batch_size=BATCH_SIZE)) + + executor = fluid.Executor(fluid.CUDAPlace(0)) + executor.run(fluid.default_startup_program()) + for i in range(3): + reader.start() + while True: + try: + executor.run(feed=None) + except fluid.core.EOFException: + reader.reset() + break + ''' assert not self._iterable, "reset() cannot be called when PyReader is iterable" self._reset() @@ -283,7 +364,7 @@ class PyReader(object): Set the data source of the PyReader object. The provided :code:`sample_generator` should be a Python generator, - which yields numpy.ndarray typed data of each sample. + which yields list(numpy.ndarray)-typed data of each sample. :code:`places` must be set when the PyReader object is iterable. @@ -292,12 +373,46 @@ class PyReader(object): Args: sample_generator (generator): Python generator that yields - numpy.ndarray-typed sample data. + list(numpy.ndarray)-typed sample data. batch_size (int): batch size. Must be larger than 0. drop_last (bool): Whether to drop the last batch when sample number is less than batch_size. places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must be provided when PyReader is iterable. + + Example: + .. code-block:: python + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.array([1]) + yield fake_image, fake_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int32') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_sample_generator(user_defined_generator, + batch_size=BATCH_SIZE, + places=[fluid.CUDAPlace(0)]) + # definition of network is omitted + executor = fluid.Executor(fluid.CUDAPlace(0)) + executor.run(fluid.default_main_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data) + ''' assert batch_size > 0, "batch_size must be larger than 0" has_lod = False @@ -336,6 +451,40 @@ class PyReader(object): list(numpy.ndarray)-typed batched data. places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must be provided when PyReader is iterable. + + Example: + .. code-block:: python + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + fake_image = np.random.uniform(low=0, + high=255, + size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int32') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_sample_list_generator( + paddle.batch(user_defined_generator, batch_size=BATCH_SIZE), + fluid.core.CUDAPlace(0)) + # definition of network is omitted + executor = fluid.Executor(fluid.core.CUDAPlace(0)) + executor.run(fluid.default_main_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data) + ''' assert self._tensor_reader is None, \ "Cannot reset the data source of PyReader" @@ -364,6 +513,38 @@ class PyReader(object): batched data. places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must be provided when PyReader is iterable. + + Example: + .. code-block:: python + + EPOCH_NUM = 3 + ITER_NUM = 15 + BATCH_SIZE = 3 + + def random_image_and_label_generator(height, width): + def generator(): + for i in range(ITER_NUM): + batch_image = np.random.uniform(low=0, + high=255, + size=[BATCH_SIZE, height, width]) + batch_label = np.ones([BATCH_SIZE, 1]) + yield batch_image, batch_label + return generator + + image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int32') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + + user_defined_generator = random_image_and_label_generator(784, 784) + reader.decorate_batch_generator(user_defined_generator, fluid.CUDAPlace(0)) + # definition of network is omitted + executor = fluid.Executor(fluid.CUDAPlace(0)) + executor.run(fluid.default_main_program()) + + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data) + ''' assert self._tensor_reader is None, \ "Cannot reset the data source of PyReader" diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 46664ea33d7b805f3a0bc97db1b36e1eb172a083..aa4fc5ceb905034183ff235e503a70017cb27bce 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -124,7 +124,7 @@ foreach(TEST_OP ${TEST_OPS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP}) endforeach(TEST_OP) py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS FLAGS_inner_op_parallelism=4) -py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR} SERIAL) +py_test_modules(test_warpctc_op MODULES test_warpctc_op SERIAL) py_test_modules(test_bilinear_interp_op MODULES test_bilinear_interp_op ENVS ${GC_ENVS} SERIAL) py_test_modules(test_nearest_interp_op MODULES test_nearest_interp_op ENVS ${GC_ENVS} SERIAL) py_test_modules(test_imperative_resnet MODULES test_imperative_resnet ENVS diff --git a/python/paddle/fluid/tests/unittests/gradient_checker.py b/python/paddle/fluid/tests/unittests/gradient_checker.py index 14a828f28ee8141140b15afdfa7aa6f894a11b1a..87c917873cd97f7512621d45f64b2ae9e76bd33b 100644 --- a/python/paddle/fluid/tests/unittests/gradient_checker.py +++ b/python/paddle/fluid/tests/unittests/gradient_checker.py @@ -82,6 +82,10 @@ def set_var_in_scope(scope, place, name, value, recursive_seq_len=None): return t +def var_to_np_array_in_scope(scope, place, name): + return np.array(scope.var(name).get_tensor()) + + def make_jacobian(x, y_size, np_dtype): if isinstance(x, fluid.framework.Variable): return np.zeros((_product(x.shape), y_size), dtype=np_dtype) @@ -192,14 +196,18 @@ def _compute_analytical_jacobian(program, x, y, place, scope): x = _as_list(x) jacobian = make_jacobian(x, y_size, np_type) - dx = _as_list(dx) for i in six.moves.xrange(y_size): _set_item(dy_t, i, 1, np_type) dx_res = exe.run(program, scope=scope, fetch_list=dx) for j in six.moves.xrange(len(x)): - jacobian[j][:, i] = dx_res[j].flatten() + if dx_res[j] is not None: + jacobian[j][:, i] = dx_res[j].flatten() + else: + jacobian[j][:, i] = np.zeros( + dx[j].shape, dtype=np_type).flatten() + _set_item(dy_t, i, 0, np_type) return jacobian @@ -242,6 +250,7 @@ def grad_check(x, # check input arguments x = _as_list(x) y = _as_list(y) + for v in x: v.stop_gradient = False v.persistable = True @@ -274,9 +283,24 @@ def grad_check(x, ] # [y_idx, x_idx] - analytical = [ - _compute_analytical_jacobian(program, x, yi, place, scope) for yi in y - ] + analytical = [] + for yi in y: + prog = program.clone() + + clone_x = [] + clone_y = None + for b in prog.blocks: + if b.has_var(yi.name): + clone_y = b.var(yi.name) + break + for xi in x: + for b in prog.blocks: + if b.has_var(xi.name): + clone_x.append(b.var(xi.name)) + break + + analytical.append( + _compute_analytical_jacobian(prog, clone_x, clone_y, place, scope)) for i, (x_idx, y_idx) in enumerate(product(*[range(len(x)), range(len(y))])): @@ -334,6 +358,7 @@ def double_grad_check(x, if y_grads is None: scope = fluid.executor.global_scope() y_grads = [] + y_grads_init = [] for yi in y: dyi_name = _append_grad_suffix_(yi.name) np_type = dtype_to_np_dtype(yi.dtype) @@ -343,9 +368,20 @@ def double_grad_check(x, v = np.random.random(size=yi.shape).astype(np_type) set_var_in_scope(scope, place, dyi_name, v) y_grads.append(dy) + y_grads_init.append(v) else: y_grads = _as_list(y_grads) + y_grads_init = [ + var_to_np_array_in_scope(scope, place, v.name) for v in y_grads + ] # append first order grads target_grads = calc_gradient(y, x, y_grads) + + # y_grads are the input of first-order backward, + # so, they are also the input of second-order backward. + x += y_grads + x_init = _as_list(x_init) + x_init += y_grads_init + grad_check(x, target_grads, x_init, place, program, eps, atol, rtol) diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_lrn_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_lrn_ngraph_op.py new file mode 100644 index 0000000000000000000000000000000000000000..4c998c6ca2ec3bba8d7c3257ed3fe4fddc70a46a --- /dev/null +++ b/python/paddle/fluid/tests/unittests/ngraph/test_lrn_ngraph_op.py @@ -0,0 +1,29 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +from paddle.fluid.tests.unittests.test_lrn_op import TestLRNOp + + +class TestLRNNGRAPHOp(TestLRNOp): + def test_check_output(self): + self.check_output(atol=0.002) + + +del TestLRNOp + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py index 0cb08842df0797952c47a63ba2bbb8614c0e8a22..09c52e2b1084fc5f716a6d1abfb4968d2c5460da 100644 --- a/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py +++ b/python/paddle/fluid/tests/unittests/ngraph/test_softmax_ngraph_op.py @@ -14,7 +14,7 @@ from __future__ import print_function import unittest -from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp +from paddle.fluid.tests.unittests.test_softmax_op import TestSoftmaxOp, TestSoftmaxOp2, TestSoftmaxOp3, TestSoftmaxOp4, TestSoftmaxOp5 if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_diag.py b/python/paddle/fluid/tests/unittests/test_diag.py new file mode 100644 index 0000000000000000000000000000000000000000..eed8b91f0e3dc5a0552e9d912e2b63d724c4d6d9 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_diag.py @@ -0,0 +1,43 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest + + +class TestDiagOp(OpTest): + def setUp(self): + self.op_type = "diag" + self.init_config() + self.inputs = {'Diagonal': self.case} + + self.outputs = {'Out': np.diag(self.inputs['Diagonal'])} + + def test_check_output(self): + self.check_output() + + def init_config(self): + self.case = np.arange(3, 6) + + +class TestDiagOpCase1(TestDiagOp): + def init_config(self): + self.case = np.array([3], dtype='int32') + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py index 07038b0441d0dc37a42cbf2058c1b5f41b47a5da..8d82438c15c7853f39566937e4192ef88a4f79ae 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py @@ -130,6 +130,38 @@ class TestFakeQuantizeMovingOp(OpTest): self.check_output() +class TestMovingAverageAbsMaxScaleOp(OpTest): + def setUp(self): + self.op_type = "moving_average_abs_max_scale" + self.attrs = {'moving_rate': float(0.9), 'is_test': False} + accum = np.zeros(1).astype("float32") + accum[0] = 1 + state = np.zeros(1).astype("float32") + state[0] = 1 + self.inputs = { + 'X': np.random.random((8, 16, 7, 7)).astype("float32"), + 'InAccum': accum, + 'InState': state, + } + + out_accum = np.zeros(1).astype("float32") + out_state = np.zeros(1).astype("float32") + out_scale = np.zeros(1).astype("float32") + out_accum[0] = self.attrs['moving_rate'] * accum[0] + np.max( + np.abs(self.inputs['X'])).astype("float32") + out_state[0] = self.attrs['moving_rate'] * state[0] + 1 + out_scale = out_accum / out_state + self.outputs = { + 'Out': self.inputs['X'], + 'OutAccum': out_accum, + 'OutState': out_state, + 'OutScale': out_scale, + } + + def test_check_output(self): + self.check_output() + + class TestFakeQuantizeRangeAbsMaxOp2(OpTest): def setUp(self): self.op_type = "fake_quantize_range_abs_max" diff --git a/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py b/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py index a19626297a677359d622dddfb484baba2e110c0c..d4e514fa24c5efe6c0253ce3689f87dea4566f8d 100644 --- a/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py +++ b/python/paddle/fluid/tests/unittests/test_inplace_softmax_with_cross_entropy.py @@ -21,25 +21,39 @@ import unittest class TestSoftmaxWithXe(unittest.TestCase): def setUp(self): + self.initParameter() self.m, self.n = np.random.random_integers( low=100, high=2000, size=[2]).astype('int64') - def softmax_with_xe(self, x, y, place, inplace=True): + def initParameter(self): + self.dtype = 'float32' + self.soft_label = False + + def softmax_with_xe(self, + x, + y, + place, + inplace=True, + numeric_stable_mode=True): m, n = x.shape with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.scope_guard(fluid.Scope()): x_d = fluid.layers.data( name='x', shape=[m, n], - dtype='float32', + dtype=self.dtype, append_batch_size=False) y_d = fluid.layers.data( name='y', - shape=[m, 1], - dtype='int64', + shape=[m, 1] if not self.soft_label else [m, n], + dtype='int64' if not self.soft_label else self.dtype, append_batch_size=False) z_d, s_d = fluid.layers.softmax_with_cross_entropy( - x_d, y_d, return_softmax=True) + x_d, + y_d, + soft_label=self.soft_label, + return_softmax=True, + numeric_stable_mode=numeric_stable_mode) exe = fluid.Executor(place) @@ -51,7 +65,7 @@ class TestSoftmaxWithXe(unittest.TestCase): )).with_data_parallel( build_strategy=build_strategy, places=place) - if inplace and isinstance(place, fluid.CUDAPlace): + if inplace: fetch_list = [z_d.name, x_d.name] else: fetch_list = [z_d.name, s_d.name] @@ -63,16 +77,33 @@ class TestSoftmaxWithXe(unittest.TestCase): return z, s def main_with_place(self, place): - x = np.random.random(size=[self.m, self.n]).astype('float32') + x = np.random.random(size=[self.m, self.n]).astype(self.dtype) x_range = [(-30, 30), (10, 20), (-1, 1), (2, 3), (0, 0.3), (-200, -100)] for a, b in x_range: - x = ((b - a) * x + a).astype('float32') - y = np.random.random_integers( - size=[self.m, 1], low=0, high=self.n - 1).astype('int64') - z1, s1 = self.softmax_with_xe(x, y, place, False) - z2, s2 = self.softmax_with_xe(x, y, place, True) + x = ((b - a) * x + a).astype(self.dtype) + if not self.soft_label: + y = np.random.random_integers( + size=[self.m, 1], low=0, high=self.n - 1).astype('int64') + else: + y = np.random.random(size=[self.m, self.n]).astype(self.dtype) + norm_y = np.broadcast_to( + np.reshape( + np.sum(y, axis=1), [-1, 1]), y.shape) + y = y / norm_y + + z1, s1 = self.softmax_with_xe( + x, y, place, inplace=False, numeric_stable_mode=False) + z2, s2 = self.softmax_with_xe( + x, y, place, inplace=True, numeric_stable_mode=False) + + self.assertTrue((z1 == z2).all()) + self.assertTrue((s1 == s2).all()) + z1, s1 = self.softmax_with_xe( + x, y, place, inplace=False, numeric_stable_mode=True) + z2, s2 = self.softmax_with_xe( + x, y, place, inplace=True, numeric_stable_mode=True) self.assertTrue((z1 == z2).all()) self.assertTrue((s1 == s2).all()) @@ -82,5 +113,23 @@ class TestSoftmaxWithXe(unittest.TestCase): self.main_with_place(fluid.CUDAPlace(0)) +class TestSoftmaxWithXe1(TestSoftmaxWithXe): + def initParameter(self): + self.dtype = 'float32' + self.soft_label = True + + +class TestSoftmaxWithXe2(TestSoftmaxWithXe): + def initParameter(self): + self.dtype = 'float64' + self.soft_label = False + + +class TestSoftmaxWithXe3(TestSoftmaxWithXe): + def initParameter(self): + self.dtype = 'float64' + self.soft_label = True + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 46f025c33bc9cc3a7197a4e87475b4d9c132b4ed..2474125835fbf54316e26d272eec940fc380a448 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1221,10 +1221,25 @@ class TestBook(LayerTest): y = self._get_data(name='label', shape=[1], dtype='int64') loss, softmax = layers.softmax_with_cross_entropy( x, y, return_softmax=True) - return (loss) - return (softmax) + self.assertIsNotNone(loss) + self.assertIsNotNone(softmax) + loss = layers.softmax_with_cross_entropy(x, y) - return (loss) + self.assertIsNotNone(loss) + + x1 = self._get_data(name='x1', shape=[16, 32, 64], dtype='float32') + y1 = self._get_data(name='label1', shape=[1, 32, 64], dtype='int64') + y2 = self._get_data(name='label2', shape=[16, 1, 64], dtype='int64') + y3 = self._get_data(name='label3', shape=[16, 32, 1], dtype='int64') + loss1 = layers.softmax_with_cross_entropy(x1, y1, axis=1) + loss2 = layers.softmax_with_cross_entropy(x1, y2, axis=2) + loss3 = layers.softmax_with_cross_entropy(x1, y3, axis=3) + loss4 = layers.softmax_with_cross_entropy(x1, y3, axis=-1) + self.assertIsNotNone(loss1) + self.assertIsNotNone(loss2) + self.assertIsNotNone(loss3) + self.assertIsNotNone(loss4) + return (loss4) def make_smooth_l1(self): with program_guard(fluid.default_main_program(), diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index e2d540fea558a997eb0570dee79a91881f4dac0c..df0d8e0345cf497f264b59049a4e01ba9aa85d85 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -46,7 +46,6 @@ class TestMulGradCheck(unittest.TestCase): class TestReluDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place): - # the shape of input variable shoule be clearly specified, not inlcude -1. shape = [2, 8] eps = 0.005 dtype = np.float64 @@ -71,7 +70,6 @@ class TestReluDoubleGradCheck(unittest.TestCase): class TestLeakyReluDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place): - # the shape of input variable shoule be clearly specified, not inlcude -1. shape = [3, 7] eps = 0.005 alpha = 0.2 @@ -79,6 +77,7 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): x = layers.data('x', shape, False, dtype) x.persistable = True + y = layers.leaky_relu(x, alpha=alpha) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.02 @@ -90,8 +89,30 @@ class TestLeakyReluDoubleGradCheck(unittest.TestCase): places = [fluid.CPUPlace()] if core.is_compiled_with_cuda(): places.append(fluid.CUDAPlace(0)) - for p in places: - self.func(p) + + +class TestConvDoubleGradCheck(unittest.TestCase): + @prog_scope() + def func(self, place): + shape = [2, 4, 14, 16] + eps = 0.005 + dtype = np.float64 + x = layers.data('x', shape, False, dtype) + y = layers.conv2d(x, 4, 1, bias_attr=False) + x_arr = np.random.uniform(-1, 1, shape).astype(dtype) + + w = fluid.default_main_program().global_block().all_parameters() + w_arr = [] + for p in w: + w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) + gradient_checker.double_grad_check( + [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps) + + def test_grad(self): + if core.is_compiled_with_cuda(): + places = [fluid.CUDAPlace(0)] + for p in places: + self.func(p) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py new file mode 100644 index 0000000000000000000000000000000000000000..e0bae089829b330e1a2dba34782f096f24279368 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py @@ -0,0 +1,77 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle.fluid as fluid +import numpy +import os + + +class TestParallelExecutorDropExeScope(unittest.TestCase): + def check_drop_scope(self, use_cuda=True): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + if not use_cuda: + os.environ['CPU_NUM'] = str(2) + + train_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(train_program, startup_program): + data = fluid.layers.data(name='X', shape=[1], dtype='float32') + hidden = fluid.layers.fc(input=data, size=10) + loss = fluid.layers.mean(hidden) + test_program = fluid.default_main_program().clone(for_test=True) + fluid.optimizer.SGD(learning_rate=0.01).minimize(loss) + + exe = fluid.Executor(place) + exe.run(startup_program) + + exec_strateg = fluid.ExecutionStrategy() + exec_strateg.num_iteration_per_drop_scope = 10 + + train_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + main_program=train_program, + loss_name=loss.name, + exec_strategy=exec_strateg) + test_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + main_program=test_program, + share_vars_from=train_exe, + exec_strategy=exec_strateg) + + x = numpy.random.random(size=(10, 1)).astype('float32') + train_exe.run(feed={"X": x}, fetch_list=[loss.name]) + test_exe.run(feed={"X": x}, fetch_list=[loss.name]) + + assert train_exe._need_create_local_exe_scopes() == False + assert test_exe._need_create_local_exe_scopes() == False + + # drop the local execution scope immediately + train_exe.drop_local_exe_scopes() + test_exe.drop_local_exe_scopes() + + assert train_exe._need_create_local_exe_scopes() + assert test_exe._need_create_local_exe_scopes() + + def test_drop_scope(self): + self.check_drop_scope(use_cuda=False) + if fluid.core.is_compiled_with_cuda(): + self.check_drop_scope(use_cuda=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index b06b52f75d21a720e2473feba6ba2e1dccc2db89..d37731146d9c431bb6a0c333149ac62a0c4efd3b 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -21,37 +21,70 @@ from op_test import OpTest from test_softmax_op import stable_softmax +def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1): + if soft_label: + return (-label * np.log(softmax)).sum(axis=axis, keepdims=True) + + shape = softmax.shape + axis %= len(shape) + n = int(np.prod(shape[:axis])) + axis_dim = shape[axis] + remain = int(np.prod(shape[axis + 1:])) + softmax_reshape = softmax.reshape((n, axis_dim, remain)) + label_reshape = label.reshape((n, 1, remain)) + result = np.zeros_like(label_reshape, dtype=softmax.dtype) + for i in range(n): + for j in range(remain): + lbl = label_reshape[i, 0, j] + if lbl != ignore_index: + result[i, 0, j] -= np.log(softmax_reshape[i, lbl, j]) + return result.reshape(label.shape) + + class TestSoftmaxWithCrossEntropyOp(OpTest): """ Test softmax with cross entropy operator with discreate one-hot labels. """ def initParams(self): + self.op_type = "softmax_with_cross_entropy" self.numeric_stable_mode = False + self.soft_label = False self.dtype = np.float64 + self.axis = -1 + self.ignore_index = -1 + self.shape = [41, 37] def setUp(self): self.initParams() - self.op_type = "softmax_with_cross_entropy" - batch_size = 41 - class_num = 37 - logits = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype(self.dtype) - softmax = np.apply_along_axis(stable_softmax, 1, logits) - labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64") + logits = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) + softmax = np.apply_along_axis(stable_softmax, self.axis, logits) + + if self.soft_label: + labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype) + labels /= np.sum(labels, axis=self.axis, keepdims=True) + else: + axis_dim = self.shape[self.axis] + self.shape[self.axis] = 1 + labels = np.random.randint(0, axis_dim, self.shape, dtype="int64") - cross_entropy = np.asmatrix( - [[-np.log(softmax[i][labels[i][0]])] - for i in range(softmax.shape[0])], - dtype=self.dtype) + loss = cross_entropy(softmax, labels, self.soft_label, self.axis, + self.ignore_index) self.inputs = {"Logits": logits, "Label": labels} self.outputs = { "Softmax": softmax.astype(self.dtype), - "Loss": cross_entropy.astype(self.dtype) + "Loss": loss.astype(self.dtype) } - self.attrs = {"numeric_stable_mode": self.numeric_stable_mode} + self.attrs = { + "numeric_stable_mode": self.numeric_stable_mode, + "soft_label": self.soft_label, + } + if self.ignore_index >= 0: + self.attrs['ignore_index'] = self.ignore_index + if self.axis != -1: + self.attrs['axis'] = self.axis def test_check_output(self): self.check_output() @@ -62,30 +95,38 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): class TestSoftmaxWithCrossEntropyOpNoCudnn(TestSoftmaxWithCrossEntropyOp): def initParams(self): + self.op_type = "softmax_with_cross_entropy" self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.axis = -1 + self.ignore_index = -1 + self.dtype = np.float64 class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): def initParams(self): + self.op_type = "softmax_with_cross_entropy" self.numeric_stable_mode = False + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.axis = -1 + self.ignore_index = -1 self.dtype = np.float16 def setUp(self): self.initParams() self.op_type = "softmax_with_cross_entropy" - batch_size = 41 - class_num = 37 # NOTE: numpy float16 have very low accuracy, use float32 for numpy check. - logits = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype(np.float32) - softmax = np.apply_along_axis(stable_softmax, 1, logits) - labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64") + logits = np.random.uniform(0.1, 1.0, self.shape).astype(np.float32) + softmax = np.apply_along_axis(stable_softmax, self.axis, logits) - cross_entropy = np.asmatrix( - [[-np.log(softmax[i][labels[i][0]])] - for i in range(softmax.shape[0])], - dtype=np.float32) + axis_dim = self.shape[self.axis] + self.shape[self.axis] = 1 + labels = np.random.randint(0, axis_dim, self.shape, dtype="int64") + + loss = cross_entropy(softmax, labels, self.soft_label, self.axis) self.inputs = { "Logits": logits.astype(self.dtype).view(np.uint16), @@ -93,9 +134,14 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): } self.outputs = { "Softmax": softmax.astype(self.dtype), - "Loss": cross_entropy.astype(self.dtype) + "Loss": loss.astype(self.dtype) + } + self.attrs = { + "numeric_stable_mode": self.numeric_stable_mode, + "soft_label": self.soft_label, } - self.attrs = {"numeric_stable_mode": self.numeric_stable_mode} + if self.axis != -1: + self.attrs['axis'] = self.axis def test_check_output(self): self.check_output(atol=1e-2) @@ -107,39 +153,31 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp): class TestSoftmaxWithCrossEntropyOpNoCudnnFp16( TestSoftmaxWithCrossEntropyOpFp16): def initParams(self): + self.op_type = "softmax_with_cross_entropy" self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.axis = -1 + self.ignore_index = -1 self.dtype = np.float16 def test_check_grad(self): self.check_grad(["Logits"], "Loss", max_relative_error=0.1) -class TestSoftmaxWithCrossEntropyOp2(OpTest): +class TestSoftmaxWithCrossEntropyOp2(TestSoftmaxWithCrossEntropyOp): """ Test softmax with cross entropy operator with soft labels. """ - def setUp(self): + def initParams(self): self.op_type = "softmax_with_cross_entropy" - batch_size = 41 - class_num = 37 - - logits = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype("float64") - softmax = np.apply_along_axis(stable_softmax, 1, logits) - labels = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype("float64") - labels /= np.sum(labels, axis=1, keepdims=True) - - cross_entropy = (-labels * np.log(softmax)).sum( - axis=1, keepdims=True).astype("float64") - - self.inputs = {"Logits": logits, "Label": labels} - self.outputs = { - "Softmax": softmax.astype("float64"), - "Loss": cross_entropy.astype("float64") - } - self.attrs = {"soft_label": True} + self.numeric_stable_mode = True + self.soft_label = True + self.dtype = np.float64 + self.axis = -1 + self.ignore_index = -1 + self.shape = [41, 37] def test_check_output(self): self.check_output() @@ -148,190 +186,226 @@ class TestSoftmaxWithCrossEntropyOp2(OpTest): self.check_grad(["Logits"], "Loss") -class TestSoftmaxWithCrossEntropyOp3(OpTest): +class TestSoftmaxWithCrossEntropyOp3(TestSoftmaxWithCrossEntropyOp): """ Test softmax with cross entropy operator with ignore_index. """ def initParams(self): + self.op_type = "softmax_with_cross_entropy" self.numeric_stable_mode = False + self.soft_label = False + self.shape = [41, 37] + self.ignore_index = 5 + self.axis = -1 + self.dtype = np.float64 - def setUp(self): - self.initParams() + +class TestSoftmaxWithCrossEntropyOp3NoCudnn(TestSoftmaxWithCrossEntropyOp3): + def initParams(self): self.op_type = "softmax_with_cross_entropy" - batch_size = 41 - class_num = 37 - - logits = np.random.uniform(0.1, 1.0, - [batch_size, class_num]).astype("float64") - softmax = np.apply_along_axis(stable_softmax, 1, logits) - labels = np.random.randint(0, class_num, [batch_size, 1], dtype="int64") - ignore_index = 7 - cross_entropy = np.asmatrix( - [[-np.log(softmax[i][labels[i][0]])] - if labels[i] != ignore_index else [0] - for i in range(softmax.shape[0])], - dtype="float64") + self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.ignore_index = 4 + self.axis = -1 + self.dtype = np.float64 - self.inputs = {"Logits": logits, "Label": labels} - self.outputs = { - "Softmax": softmax.astype("float64"), - "Loss": cross_entropy.astype("float64") - } - self.attrs = { - "ignore_index": ignore_index, - "numeric_stable_mode": self.numeric_stable_mode - } - def test_check_output(self): - self.check_output() +class TestSoftmaxWithCrossEntropyOpAxis1(TestSoftmaxWithCrossEntropyOp): + """ + Test softmax with cross entropy operator with discreate one-hot labels. + Given axis != -1 + """ - def test_check_grad(self): - self.check_grad(["Logits"], "Loss") + def initParams(self): + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = False + self.dtype = np.float64 + self.axis = 0 + self.ignore_index = -1 + self.shape = [3, 5, 7, 11] -class TestSoftmaxWithCrossEntropyOp3NoCudnn(TestSoftmaxWithCrossEntropyOp3): +class TestSoftmaxWithCrossEntropyOpAxis2(TestSoftmaxWithCrossEntropyOp): + """ + Test softmax with cross entropy operator with discreate one-hot labels. + Given axis != -1 + """ + def initParams(self): + self.op_type = "softmax_with_cross_entropy" self.numeric_stable_mode = True + self.soft_label = False + self.dtype = np.float64 + self.axis = 1 + self.ignore_index = -1 + self.shape = [3, 5, 7, 11] -class TestSoftmaxWithCrossEntropyOp5(OpTest): +class TestSoftmaxWithCrossEntropyOpAxis3(TestSoftmaxWithCrossEntropyOp): """ - Test softmax with cross entropy operator with ignore_index. + Test softmax with cross entropy operator with discreate one-hot labels. + Given axis != -1 """ def initParams(self): - self.numeric_stable_mode = False + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = False + self.dtype = np.float64 + self.axis = 2 + self.ignore_index = -1 + self.shape = [3, 5, 7, 11] - def setUp(self): - self.initParams() + +class TestSoftmaxWithCrossEntropyOpAxis4(TestSoftmaxWithCrossEntropyOp): + """ + Test softmax with cross entropy operator with discreate one-hot labels. + Given axis != -1 + """ + + def initParams(self): self.op_type = "softmax_with_cross_entropy" - batch_size = [6, 10] - class_num = 47 - - logits = np.random.uniform( - 0.1, 1.0, tuple(batch_size + [class_num])).astype("float64") - softmax = np.apply_along_axis(stable_softmax, 2, logits) - labels = np.random.randint( - 0, class_num, tuple(batch_size + [1]), dtype="int64") - ignore_index = 7 - - softmax_2d = np.reshape(softmax, [-1, class_num]) - labels_2d = np.reshape(labels, [-1, 1]) - cross_entropy = np.asmatrix( - [[-np.log(softmax_2d[i][labels_2d[i][0]])] - if labels_2d[i] != ignore_index else [0] - for i in range(softmax_2d.shape[0])], - dtype="float64") - - cross_entropy = np.reshape(cross_entropy, batch_size) - - output_shape = tuple(batch_size + [1]) - output_res = cross_entropy.astype("float64") - output_res = np.expand_dims(output_res, axis=2) - self.inputs = {"Logits": logits, "Label": labels} - self.outputs = { - "Softmax": softmax.astype("float64"), - "Loss": output_res, - } - self.attrs = { - "ignore_index": ignore_index, - "numeric_stable_mode": self.numeric_stable_mode - } + self.numeric_stable_mode = True + self.soft_label = False + self.dtype = np.float64 + self.axis = 3 + self.ignore_index = -1 + self.shape = [3, 5, 7, 11] - def test_check_output(self): - self.check_output() - def test_check_grad(self): - self.check_grad(["Logits"], "Loss") +class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis1( + TestSoftmaxWithCrossEntropyOpNoCudnnFp16): + def initParams(self): + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.axis = 0 + self.ignore_index = -1 + self.dtype = np.float16 -class TestSoftmaxWithCrossEntropyOp5NoCudnn(TestSoftmaxWithCrossEntropyOp5): +class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis2( + TestSoftmaxWithCrossEntropyOpNoCudnnFp16): def initParams(self): + self.op_type = "softmax_with_cross_entropy" self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.axis = 1 + self.ignore_index = -1 + self.dtype = np.float16 -class TestSoftmaxWithCrossEntropyOp6(OpTest): - """ - Test softmax with cross entropy operator with soft labels. - """ +class TestSoftmaxWithCrossEntropyOpNoCudnnFp16Axis3( + TestSoftmaxWithCrossEntropyOpNoCudnnFp16): + def initParams(self): + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.axis = 2 + self.ignore_index = -1 + self.dtype = np.float16 - def setUp(self): + +class TestSoftmaxWithCrossEntropyOpSoftLabelAxis1( + TestSoftmaxWithCrossEntropyOp2): + def initParams(self): self.op_type = "softmax_with_cross_entropy" - batch_size = [6, 10] - class_num = 37 + self.numeric_stable_mode = True + self.soft_label = True + self.shape = [3, 5, 7, 11] + self.axis = 0 + self.ignore_index = -1 + self.dtype = np.float64 - logits = np.random.uniform( - 0.1, 1.0, tuple(batch_size + [class_num])).astype("float64") - softmax = np.apply_along_axis(stable_softmax, 2, logits) - labels = np.random.uniform( - 0.1, 1.0, tuple(batch_size + [class_num])).astype("float64") - labels /= np.sum(labels, axis=2, keepdims=True) - cross_entropy = (-labels * np.log(softmax)).sum( - axis=2, keepdims=True).astype("float64") +class TestSoftmaxWithCrossEntropyOpSoftLabelAxis2( + TestSoftmaxWithCrossEntropyOp2): + def initParams(self): + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = True + self.shape = [3, 5, 7, 11] + self.axis = 1 + self.ignore_index = -1 + self.dtype = np.float64 - self.inputs = {"Logits": logits, "Label": labels} - self.outputs = { - "Softmax": softmax.astype("float64"), - "Loss": cross_entropy.astype("float64") - } - self.attrs = {"soft_label": True} - def test_check_output(self): - self.check_output() +class TestSoftmaxWithCrossEntropyOpSoftLabelAxis3( + TestSoftmaxWithCrossEntropyOp2): + def initParams(self): + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = True + self.shape = [3, 5, 7, 11] + self.axis = 2 + self.ignore_index = -1 + self.dtype = np.float64 - def test_check_grad(self): - self.check_grad(["Logits"], "Loss") + +class TestSoftmaxWithCrossEntropyOpSoftLabelAxis4( + TestSoftmaxWithCrossEntropyOp2): + def initParams(self): + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = True + self.shape = [3, 5, 7, 11] + self.axis = 3 + self.ignore_index = -1 + self.dtype = np.float64 -class TestSoftmaxWithCrossEntropyOpFp16_2(TestSoftmaxWithCrossEntropyOp): +class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis1( + TestSoftmaxWithCrossEntropyOp3): def initParams(self): - self.numeric_stable_mode = False - self.dtype = np.float16 + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.ignore_index = 1 + self.axis = 0 + self.dtype = np.float64 - def setUp(self): - self.initParams() + +class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis2( + TestSoftmaxWithCrossEntropyOp3): + def initParams(self): self.op_type = "softmax_with_cross_entropy" - batch_size = [64, 10] - class_num = 37 + self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.ignore_index = 0 + self.axis = 1 + self.dtype = np.float64 - # NOTE: numpy float16 have very low accuracy, use float32 for numpy check. - logits = np.random.uniform( - 0.1, 1.0, tuple(batch_size + [class_num])).astype(np.float32) - softmax = np.apply_along_axis(stable_softmax, 2, logits) - labels = np.random.randint( - 0, class_num, tuple(batch_size + [1]), dtype="int64") - - softmax_2d = np.reshape(softmax, [-1, class_num]) - labels_2d = np.reshape(labels, [-1, 1]) - - cross_entropy = np.asmatrix( - [[-np.log(softmax_2d[i][labels_2d[i][0]])] - for i in range(softmax_2d.shape[0])], - dtype=np.float32) - - cross_entropy = np.reshape(cross_entropy, batch_size) - output_shape = tuple(batch_size + [1]) - output_res = cross_entropy.astype(self.dtype) - output_res = np.expand_dims(output_res, axis=2) - self.inputs = {"Logits": logits, "Label": labels} - self.inputs = { - "Logits": logits.astype(self.dtype).view(np.uint16), - "Label": labels - } - self.outputs = { - "Softmax": softmax.astype(self.dtype), - "Loss": output_res, - } - self.attrs = {"numeric_stable_mode": self.numeric_stable_mode} +class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis3( + TestSoftmaxWithCrossEntropyOp3): + def initParams(self): + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.ignore_index = 3 + self.axis = 2 + self.dtype = np.float64 - def test_check_output(self): - self.check_output(atol=1e-2) - def test_check_grad(self): - self.check_grad(["Logits"], "Loss", max_relative_error=0.1) +class TestSoftmaxWithCrossEntropyOpIgnoreIndexNoCudnnAxis4( + TestSoftmaxWithCrossEntropyOp3): + def initParams(self): + self.op_type = "softmax_with_cross_entropy" + self.numeric_stable_mode = True + self.soft_label = False + self.shape = [3, 5, 7, 11] + self.ignore_index = 3 + self.axis = 3 + self.dtype = np.float64 if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_version.py b/python/paddle/fluid/tests/unittests/test_version.py index 42a0e5c802c53ed0e6aad38fb9ab0f64122e87f5..a3927ef11d3d3a0340f8400e8c540efd38104f32 100644 --- a/python/paddle/fluid/tests/unittests/test_version.py +++ b/python/paddle/fluid/tests/unittests/test_version.py @@ -30,18 +30,14 @@ class VersionTest(unittest.TestCase): self._commit_regex = "[0-9a-f]{5,49}" def test_check_output(self): - # check commit format - self.assertTrue(re.match(self._commit_regex, fluid_version.commit)) self.assertTrue(isinstance(fluid_version.istaged, bool)) # check version format if fluid_version.istaged: - self.assertEqual(fluid_version.major, 0) - self.assertEqual(fluid_version.minor, 0) - self.assertEqual(fluid_version.patch, "0") - self.assertEqual(fluid_version.rc, 0) - self.assertEqual(fluid_version.full_version, "0.0.0") + self.assertEqual(fluid_version.full_version, "latest") else: + # check commit format + self.assertTrue(re.match(self._commit_regex, fluid_version.commit)) self.assertTrue(re.match(self._major_regex, fluid_version.major)) self.assertTrue(re.match(self._minor_regex, fluid_version.minor)) self.assertTrue(re.match(self._patch_regex, fluid_version.patch)) diff --git a/python/paddle/fluid/tests/unittests/test_where.py b/python/paddle/fluid/tests/unittests/test_where.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0fa1613093c982320337aaa453114cfb187db4 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_where.py @@ -0,0 +1,92 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest +import paddle.fluid.core as core +from paddle.fluid.op import Operator + + +class TestWhereOp(OpTest): + def setUp(self): + self.op_type = "where" + self.init_config() + + def test_check_output(self): + self.check_output() + + def init_config(self): + self.inputs = {'Condition': np.array([True, False, True]), } + + self.outputs = {'Out': np.array([[0], [2]], dtype='int64')} + + +class TestAllFalse(unittest.TestCase): + def setUp(self): + self.op_type = "where" + self.init_config() + + def check_with_place(self, place): + scope = core.Scope() + condition = scope.var('Condition').get_tensor() + condition.set(self.cond_data, place) + + out = scope.var("Out").get_tensor() + out.set(np.full(self.shape, 0).astype('int64'), place) + + op = Operator("where", Condition="Condition", Out="Out") + op.run(scope, place) + + out_array = np.array(out) + self.assertTrue((out_array == self.out_data).all()) + + def init_config(self): + self.cond_data = np.array([False, False, False]) + self.shape = (3, 1) + self.out_data = np.array([], dtype='int64') + + def test_all_false(self): + self.check_with_place(core.CPUPlace()) + + if core.is_compiled_with_cuda(): + self.check_with_place(core.CUDAPlace(0)) + + +class TestRank2(TestWhereOp): + def init_config(self): + self.inputs = {'Condition': np.array([[True, False], [False, True]]), } + + self.outputs = {'Out': np.array([[0, 0], [1, 1]], dtype='int64')} + + +class TestRank3(TestWhereOp): + def init_config(self): + self.inputs = { + 'Condition': np.array([[[True, False], [False, True]], + [[False, True], [True, False]], + [[False, False], [False, True]]]), + } + + self.outputs = { + 'Out': np.array( + [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [2, 1, 1]], + dtype='int64') + } + + +if __name__ == "__main__": + unittest.main() diff --git a/tools/document_preview.sh b/tools/document_preview.sh new file mode 100755 index 0000000000000000000000000000000000000000..d0e9b3178a66477b5a6015f67bfa93e7e3ca9fcd --- /dev/null +++ b/tools/document_preview.sh @@ -0,0 +1,13 @@ +#!/bin/bash +PADDLE_ROOT=/paddle +cd ${PADDLE_ROOT} +git clone https://github.com/PaddlePaddle/FluidDoc +git clone https://github.com/tianshuo78520a/PaddlePaddle.org.git +sh ${PADDLE_ROOT}/FluidDoc/doc/fluid/api/gen_doc.sh +pip install ${PADDLE_ROOT}/build/opt/paddle/share/wheels/*.whl +apt-get update && apt-get install -y python-dev build-essential +cd ${PADDLE_ROOT}/PaddlePaddle.org/portal +pip install -r requirements.txt +#If the default port is not occupied, you can use port 8000, you need to replace it with a random port on the CI. +sed -i "s#8000#$1#g" runserver +nohup ./runserver --paddle ${PADDLE_ROOT}/FluidDoc &