diff --git a/CMakeLists.txt b/CMakeLists.txt index 6bb0e5f51f4bee20905016579a99715859ab37c5..a38e32b73d51f142a1a1379541ffbd922662561a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -64,6 +64,7 @@ option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(WITH_PSLIB "Compile with pslib support" OFF) option(WITH_CONTRIB "Compile the third-party contributation" OFF) option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF) +# TODO(Superjomn) Remove WITH_ANAKIN option if not needed latter. option(WITH_ANAKIN "Compile with Anakin library" OFF) option(ANAKIN_BUILD_FAT_BIN "Build anakin cuda fat-bin lib for all device plantform, ignored when WITH_ANAKIN=OFF" OFF) option(ANAKIN_BUILD_CROSS_PLANTFORM "Build anakin lib for any nvidia device plantform. ignored when WITH_ANAKIN=OFF" ON) @@ -190,6 +191,7 @@ include(configure) # add paddle env configuration if(WITH_GPU) include(cuda) include(tensorrt) + include(anakin_subgraph) endif() if(WITH_MKL OR WITH_MKLML) include(external/anakin) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1304d6fe196c11a14a012b9f236b7a6682522e05..62b26b99bcbeddc91ed1bd0702b0d6aec2e674bf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -156,7 +156,7 @@ python \ This will enable VLOG messages generated by `buddy_allocator.{h,cc}` and in the verbose range of 0 to 3, so you will see above example VLOG message, which is in level 3. This suggests that we output overall messages in lower verbose levels, so they display with higher probability. When coding C++, please follow the verbose level convention as follows: -- verbose level 1: [framework](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework) -- verbose level 3: [operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators) -- verbose level 5: [memory](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/memory), [platform](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/platform) -- verbose level 7: [math](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/math) +- verbose level 1: [framework](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/framework) +- verbose level 3: [operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/operators) +- verbose level 5: [memory](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/memory), [platform](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/platform) +- verbose level 7: [math](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/operators/math/) diff --git a/cmake/anakin_subgraph.cmake b/cmake/anakin_subgraph.cmake new file mode 100644 index 0000000000000000000000000000000000000000..4a7d32a63553df31e0928e7b30249ff3e809cba1 --- /dev/null +++ b/cmake/anakin_subgraph.cmake @@ -0,0 +1,32 @@ +if(NOT WITH_GPU) + return() +endif() + +set(ANAKIN_ROOT "/usr" CACHE PATH "ANAKIN ROOT") +find_path(ANAKIN_INCLUDE_DIR anakin_config.h + PATHS ${ANAKIN_ROOT} ${ANAKIN_ROOT}/include + $ENV{ANAKIN_ROOT} $ENV{ANAKIN_ROOT}/include + NO_DEFAULT_PATH +) + +find_library(ANAKIN_LIBRARY NAMES libanakin_saber_common.so libanakin.so + PATHS ${ANAKIN_ROOT} + $ENV{ANAKIN_ROOT} $ENV{ANAKIN_ROOT}/lib + NO_DEFAULT_PATH + DOC "Path to ANAKIN library.") + +if(ANAKIN_INCLUDE_DIR AND ANAKIN_LIBRARY) + if(WITH_DSO) + set(ANAKIN_FOUND ON) + endif(WITH_DSO) +else() + set(ANAKIN_FOUND OFF) +endif() + +if(ANAKIN_FOUND) + message(STATUS "Current ANAKIN header is ${ANAKIN_INCLUDE_DIR}/anakin_config.h. ") + include_directories(${ANAKIN_ROOT}/include) + include_directories(${ANAKIN_ROOT}/include/saber) + link_directories(${ANAKIN_ROOT}) + add_definitions(-DPADDLE_WITH_ANAKIN) +endif() diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index fc204dc9193bb28b654936048dd61a9b461abb2f..ba8b5fc6c838b221fcfb559f1f01051fc09072a4 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -24,7 +24,7 @@ set(BOOST_PROJECT "extern_boost") # So we use 1.41.0 here. set(BOOST_VER "1.41.0") set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE) -set(BOOST_URL "http://paddlepaddledeps.cdn.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) +set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}") diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake index c5754da59bf2053931be413eb10c481adecbae6b..d96da470b3cbbd8092dbf80ec5f500af9afa2ce4 100644 --- a/cmake/external/grpc.cmake +++ b/cmake/external/grpc.cmake @@ -44,7 +44,7 @@ ExternalProject_Add( # 3. keep only zlib, cares, protobuf, boringssl under "third_party", # checkout and clean other dirs under third_party # 4. remove .git, and package the directory. - URL "http://paddlepaddledeps.cdn.bcebos.com/grpc-v1.10.x.tar.gz" + URL "http://paddlepaddledeps.bj.bcebos.com/grpc-v1.10.x.tar.gz" URL_MD5 "1f268a2aff6759839dccd256adcc91cf" PREFIX ${GRPC_SOURCES_DIR} UPDATE_COMMAND "" diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index ae2679db4aed7a77ad407f881c4482fd3914ac27..142fce816de4f06aa0a36b91e3e4ecb962a8dc2a 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -34,7 +34,7 @@ SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${MKLML_ROOT}/lib") SET(TIME_VERSION "2019.0.1.20181227") IF(WIN32) SET(MKLML_VER "mklml_win_${TIME_VERSION}" CACHE STRING "" FORCE) - SET(MKLML_URL "https://paddlepaddledeps.cdn.bcebos.com/${MKLML_VER}.zip" CACHE STRING "" FORCE) + SET(MKLML_URL "https://paddlepaddledeps.bj.bcebos.com/${MKLML_VER}.zip" CACHE STRING "" FORCE) SET(MKLML_LIB ${MKLML_LIB_DIR}/mklml.lib) SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.lib) SET(MKLML_SHARED_LIB ${MKLML_LIB_DIR}/mklml.dll) @@ -43,7 +43,7 @@ ELSE() #TODO(intel-huying): # Now enable Erf function in mklml library temporarily, it will be updated as offical version later. SET(MKLML_VER "Glibc225_vsErf_mklml_lnx_${TIME_VERSION}" CACHE STRING "" FORCE) - SET(MKLML_URL "http://paddlepaddledeps.cdn.bcebos.com/${MKLML_VER}.tgz" CACHE STRING "" FORCE) + SET(MKLML_URL "http://paddlepaddledeps.bj.bcebos.com/${MKLML_VER}.tgz" CACHE STRING "" FORCE) SET(MKLML_LIB ${MKLML_LIB_DIR}/libmklml_intel.so) SET(MKLML_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5.so) SET(MKLML_SHARED_LIB ${MKLML_LIB_DIR}/libmklml_intel.so) diff --git a/cmake/tensorrt.cmake b/cmake/tensorrt.cmake index 891ff222633741f9894c2fdb6c0096a48f8a35e1..3bf12094e4c32e69f908cbe6cefc7871fc9bb568 100644 --- a/cmake/tensorrt.cmake +++ b/cmake/tensorrt.cmake @@ -33,5 +33,6 @@ if(TENSORRT_FOUND) message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. " "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ") include_directories(${TENSORRT_INCLUDE_DIR}) + link_directories(${TENSORRT_LIBRARY}) add_definitions(-DPADDLE_WITH_TENSORRT) endif() diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index b407bd5473c556912fa93e41dbee394f1453112d..5814e4463fe06ebdb9141ec47c2b77c0172331de 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -10,6 +10,9 @@ paddle.fluid.default_startup_program (ArgSpec(args=[], varargs=None, keywords=No paddle.fluid.default_main_program (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '5430f54ab4895f9f47db6bebbaf71659')) paddle.fluid.program_guard (ArgSpec(args=['main_program', 'startup_program'], varargs=None, keywords=None, defaults=(None,)), ('document', 'b54f403e57825a1592aece03afe3afb6')) paddle.fluid.name_scope (ArgSpec(args=['prefix'], varargs=None, keywords=None, defaults=(None,)), ('document', '0ef753f5cec69fef9ae6ad8b867b33a2')) +paddle.fluid.cuda_places (ArgSpec(args=['device_ids'], varargs=None, keywords=None, defaults=(None,)), ('document', '7d9a51fc9cf3c5245b5227080a8064c3')) +paddle.fluid.cpu_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', '4c0cd83f0b401fc2ff84c70974e5d210')) +paddle.fluid.cuda_pinned_places (ArgSpec(args=['device_count'], varargs=None, keywords=None, defaults=(None,)), ('document', 'd0c3ebd813c39958c92b78e3eef7e912')) paddle.fluid.Executor.__init__ (ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.Executor.close (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'f5369953dd0c443961cf79f7a00e1a03')) paddle.fluid.Executor.run (ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)), ('document', 'f482e93b38b4018796969a2e1dde479d')) @@ -44,7 +47,7 @@ paddle.fluid.AsyncExecutor.run (ArgSpec(args=['self', 'program', 'data_feed', 'f paddle.fluid.AsyncExecutor.save_model (ArgSpec(args=['self', 'save_path'], varargs=None, keywords=None, defaults=None), ('document', 'c8ac0dfcb3b187aba25d03af7fea56b2')) paddle.fluid.AsyncExecutor.stop (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '5f23d043607bb5d55e466ec3f578e093')) paddle.fluid.CompiledProgram.__init__ (ArgSpec(args=['self', 'program_or_graph'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.CompiledProgram.with_data_parallel (ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'e1af7fd53cf868554f312779fc803864')) +paddle.fluid.CompiledProgram.with_data_parallel (ArgSpec(args=['self', 'loss_name', 'build_strategy', 'exec_strategy', 'share_vars_from', 'places'], varargs=None, keywords=None, defaults=(None, None, None, None, None)), ('document', 'a8c7793803cf976680d9478e378fa356')) paddle.fluid.CompiledProgram.with_inference_optimize (ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=None), ('document', '9e5b009d850191a010e859189c127fd8')) paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.ExecutionStrategy) -> None paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.GradientScaleStrategy, arg0: int) -> None @@ -58,6 +61,12 @@ paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program' paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2')) paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', '70f4f53f13572436ac72d1c8b5efeb9d')) paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '7a5255386075dac3c75b7058254fcdcb')) +paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a3fefec8bacd6ce83f49906a9d05e779')) +paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '7abd9cf7d695bab5bb6cf7ded5903cb2')) +paddle.fluid.io.PyReader.decorate_sample_list_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', 'faef298f73e91aedcfaf5d184f3109b7')) +paddle.fluid.io.PyReader.reset (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'ff1cc1e2beb8824d453656c72c28ddfb')) +paddle.fluid.io.PyReader.start (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'b7ea0a548991924e4cfe61a577b8e56d')) paddle.fluid.initializer.ConstantInitializer.__init__ (ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.initializer.UniformInitializer.__init__ (ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.initializer.NormalInitializer.__init__ (ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) @@ -224,6 +233,7 @@ paddle.fluid.layers.teacher_student_sigmoid_loss (ArgSpec(args=['input', 'label' paddle.fluid.layers.huber_loss (ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None), ('document', '431a4301c35032166ec029f7432c80a7')) paddle.fluid.layers.tree_conv (ArgSpec(args=['nodes_vector', 'edge_set', 'output_size', 'num_filters', 'max_depth', 'act', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(1, 2, 'tanh', None, None, None)), ('document', '34ea12ac9f10a65dccbc50100d12e607')) paddle.fluid.layers.npair_loss (ArgSpec(args=['anchor', 'positive', 'labels', 'l2_reg'], varargs=None, keywords=None, defaults=(0.002,)), ('document', '46994d10276dd4cb803b4062b5d14329')) +paddle.fluid.layers.fsp_matrix (ArgSpec(args=['x', 'y'], varargs=None, keywords=None, defaults=None), ('document', 'b76ccca3735bea4a58a0dbf0d77c5393')) paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '33bbd42027d872b3818b3d64ec52e139')) paddle.fluid.layers.open_files (ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)), ('document', 'b1ae2e1cc0750e58726374061ea90ecc')) paddle.fluid.layers.read_file (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', 'b0a1c2fc51c27a106da28f3308c41f5e')) @@ -231,7 +241,7 @@ paddle.fluid.layers.shuffle (ArgSpec(args=['reader', 'buffer_size'], varargs=Non paddle.fluid.layers.batch (ArgSpec(args=['reader', 'batch_size'], varargs=None, keywords=None, defaults=None), ('document', 'f563d376d35e1a4c4db100fd11b381a0')) paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '07e5b796674796eb1ef3fee9c10d24e3')) paddle.fluid.layers.random_data_generator (ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,)), ('document', '9b7f0f86ec24bbc97643cadcb6499cff')) -paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '13dabc57863f62ab3141586784ee356b')) +paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', '4357643685cfd65454ba5a15f0151709')) paddle.fluid.layers.create_py_reader_by_data (ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', '350f74d93fab9adb2ac4950f1c26416b')) paddle.fluid.layers.Preprocessor.__init__ (ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.Preprocessor.block (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) @@ -512,6 +522,8 @@ paddle.fluid.unique_name.guard (ArgSpec(args=['new_generator'], varargs=None, ke paddle.fluid.recordio_writer.convert_reader_to_recordio_file (ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None)), ('document', '65c7523e86f0c50bb729b01667f36310')) paddle.fluid.recordio_writer.convert_reader_to_recordio_files (ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None)), ('document', 'bc643f0f5f1b9db57ff0d8a57d379bd7')) paddle.fluid.Scope Scope() -> paddle.fluid.core._Scope +paddle.fluid.install_check.run_check (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '66b7c84a17ed32fec2df9628367be2b9')) +paddle.reader.cache (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', '1676886070eb607cb608f7ba47be0d3c')) paddle.reader.map_readers (ArgSpec(args=['func'], varargs='readers', keywords=None, defaults=None), ('document', '77cbadb09df588e21e5cc0819b69c87d')) paddle.reader.buffered (ArgSpec(args=['reader', 'size'], varargs=None, keywords=None, defaults=None), ('document', '0d6186f109feceb99f60ec50a0a624cb')) paddle.reader.compose (ArgSpec(args=[], varargs='readers', keywords='kwargs', defaults=None), ('document', '884291104e1c3f37f33aae44b7deeb0d')) diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index ad19d729ebde4a9c81c283518f3cb2ac28152443..4d54754cec00dc435000138d4f297af243813fc3 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -63,7 +63,7 @@ cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor memory) nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) -cc_library(garbage_collector SRCS garbage_collector.cc DEPS device_context memory) +cc_library(garbage_collector SRCS garbage_collector.cc DEPS device_context memory gflags glog) cc_library(reader SRCS reader.cc DEPS lod_tensor ddim) cc_test(reader_test SRCS reader_test.cc DEPS reader) @@ -164,6 +164,8 @@ else() set(NGRAPH_EXE_DEPS) endif() +cc_library(executor_gc_helper SRCS executor_gc_helper.cc DEPS scope proto_desc operator garbage_collector) + if(WITH_DISTRIBUTE) cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method sendrecvop_rpc ${GLOB_DISTRIBUTE_DEPS} graph_to_program_pass variable_helper ${NGRAPH_EXE_DEPS}) @@ -174,7 +176,7 @@ else() cc_test(test_naive_executor SRCS naive_executor_test.cc DEPS naive_executor elementwise_add_op) endif() -target_link_libraries(executor garbage_collector while_op_helper) +target_link_libraries(executor while_op_helper executor_gc_helper) cc_library(parallel_executor SRCS parallel_executor.cc DEPS threaded_ssa_graph_executor scope_buffered_ssa_graph_executor parallel_ssa_graph_executor @@ -194,6 +196,7 @@ cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_con cc_test(var_type_inference_test SRCS var_type_inference_test.cc DEPS op_registry proto_desc) cc_test(inplace_op_inference_test SRCS inplace_op_inference_test.cc DEPS op_registry proto_desc op_info memory_optimize_helper) + cc_library(selected_rows SRCS selected_rows.cc DEPS tensor) cc_test(selected_rows_test SRCS selected_rows_test.cc DEPS selected_rows) diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 7a371af510b8050aec3708d82923c707fd9d3a90..77e94e998c4db14cac9c4b2cb3136f1a6b37d5c6 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -5,6 +5,7 @@ cc_library(scale_loss_grad_op_handle SRCS scale_loss_grad_op_handle.cc DEPS op_h cc_library(fetch_op_handle SRCS fetch_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) cc_library(computation_op_handle SRCS computation_op_handle.cc DEPS framework_proto scope place operator op_registry) cc_library(rpc_op_handle SRCS rpc_op_handle.cc DEPS framework_proto scope place operator op_registry) +cc_library(fetch_barrier_op_handle SRCS fetch_barrier_op_handle.cc DEPS framework_proto scope place operator op_registry) cc_library(multi_devices_helper SRCS multi_devices_helper.cc DEPS graph graph_helper) cc_library(multi_devices_graph_print_pass SRCS multi_devices_graph_print_pass.cc DEPS multi_devices_helper) @@ -72,7 +73,7 @@ cc_library(sequential_execution_pass SRCS sequential_execution_pass.cc DEPS grap cc_library(all_reduce_deps_pass SRCS all_reduce_deps_pass.cc DEPS graph graph_helper pass) cc_library(multi_devices_graph_pass SRCS multi_devices_graph_pass.cc DEPS multi_devices_helper computation_op_handle - scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle fused_broadcast_op_handle) + scale_loss_grad_op_handle rpc_op_handle fetch_barrier_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle fused_broadcast_op_handle) cc_library(fuse_all_reduce_op_pass SRCS fuse_all_reduce_op_pass.cc DEPS graph graph_helper fused_all_reduce_op_handle) diff --git a/paddle/fluid/framework/details/all_reduce_deps_pass.cc b/paddle/fluid/framework/details/all_reduce_deps_pass.cc index ff223e616f7ef0c794e72a0028c7e5bb3f234ec0..c084410864b06b972407d50bc0998499c6f9ee80 100644 --- a/paddle/fluid/framework/details/all_reduce_deps_pass.cc +++ b/paddle/fluid/framework/details/all_reduce_deps_pass.cc @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include #include #include @@ -52,13 +53,28 @@ std::unique_ptr AllReduceDepsPass::ApplyImpl( // Note that must assert topology sort is stable auto& ops = graph->Get>(kStaleProgramOpDescs); for (auto* op_desc : ops) { - auto outputs = op_desc->Outputs(); - for (auto& o_it : outputs) { - for (auto& v : o_it.second) { // values - vars[v] = order; + try { + bool is_bk_op = + static_cast(boost::get(op_desc->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())) & + static_cast(OpRole::kBackward)); + if (!is_bk_op) continue; + + auto backward_vars = + boost::get>(op_desc->GetNullableAttr( + OpProtoAndCheckerMaker::OpRoleVarAttrName())); + PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0); + + auto outputs = op_desc->Outputs(); + for (auto& o_it : outputs) { + for (auto& v : o_it.second) { // values + vars[v] = order; + VLOG(1) << "in all_reduce_deps_pass:" << v; + } } + order++; + } catch (boost::bad_get e) { } - order++; } std::vector dist_ops; diff --git a/paddle/fluid/framework/details/eager_deletion_pass.cc b/paddle/fluid/framework/details/eager_deletion_pass.cc index 377bb915e0ce175d4e3fb74cb1ace21e5f46d9d8..a6baa26134cf36ea93dde554f808e73fa0c30b93 100644 --- a/paddle/fluid/framework/details/eager_deletion_pass.cc +++ b/paddle/fluid/framework/details/eager_deletion_pass.cc @@ -22,14 +22,9 @@ #include "paddle/fluid/framework/details/computation_op_handle.h" #include "paddle/fluid/framework/details/eager_deletion_op_handle.h" #include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/garbage_collector.h" #include "paddle/fluid/framework/ir/graph_helper.h" -DEFINE_double(memory_fraction_of_eager_deletion, 1.0, - "Fraction of eager deletion. If less than 1.0, all variables in " - "the program would be sorted according to its memory size, and " - "only the FLAGS_memory_fraction_of_eager_deletion of the largest " - "variables would be deleted."); - namespace paddle { namespace framework { namespace details { @@ -206,8 +201,9 @@ std::unique_ptr EagerDeletionPass::ApplyImpl( } } - op_vars_map = ShrinkGCVars(op_vars_map, vars, places, - FLAGS_memory_fraction_of_eager_deletion); + double memory_fraction = framework::GetEagerDeletionMemoryFraction(); + + op_vars_map = ShrinkGCVars(op_vars_map, vars, places, memory_fraction); for (auto &pair : op_vars_map) { auto *op = pair.first; @@ -239,8 +235,7 @@ std::unique_ptr EagerDeletionPass::ApplyImpl( eager_deletion_op->AddOutput(dummy_leaf); } - VLOG(10) << "FLAGS_memory_fraction_of_eager_deletion = " - << FLAGS_memory_fraction_of_eager_deletion; + VLOG(10) << "FLAGS_memory_fraction_of_eager_deletion = " << memory_fraction; VLOG(10) << "Create " << op_vars_map.size() << " EagerDeletionOpHandle(s)"; auto while_op_eager_deletion_pass = diff --git a/paddle/fluid/framework/details/early_delete_op_handle.h b/paddle/fluid/framework/details/early_delete_op_handle.h deleted file mode 100644 index c8382d34b790ba7c95415acdf0b55dc97a9cd265..0000000000000000000000000000000000000000 --- a/paddle/fluid/framework/details/early_delete_op_handle.h +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once -#include -#include -#include "paddle/fluid/framework/details/computation_op_handle.h" -#include "paddle/fluid/framework/details/op_handle_base.h" -#include "paddle/fluid/framework/details/var_handle.h" -#include "paddle/fluid/framework/garbage_collector.h" -#include "paddle/fluid/framework/lod_tensor_array.h" -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/framework/selected_rows.h" -#include "paddle/fluid/framework/tensor.h" - -namespace paddle { -namespace framework { -namespace details { - -class EarlyDeleteOpHandle : public OpHandleBase { - public: - EarlyDeleteOpHandle(ir::Node* node, const Scope* scope, - const platform::Place& place, - const std::vector& names, - GarbageCollector* gc) - : OpHandleBase(node), - scope_(scope), - place_(place), - names_(names), - gc_(gc) { -#ifdef PADDLE_WITH_CUDA - if (IsStreamGarabageCollector()) { - auto gpu_place = boost::get(place); - PADDLE_ENFORCE(cudaSetDevice(gpu_place.device)); - PADDLE_ENFORCE(cudaEventCreateWithFlags(&event_, cudaEventDisableTiming)); - } -#endif - } - ~EarlyDeleteOpHandle() { -#ifdef PADDLE_WITH_CUDA - if (IsStreamGarabageCollector()) { - auto gpu_place = boost::get(dev_ctx_->GetPlace()); - PADDLE_ENFORCE(cudaSetDevice(gpu_place.device)); - PADDLE_ENFORCE(cudaEventDestroy(event_)); - } -#endif - } - - std::string Name() const override { return "early_delete"; } - - protected: - void RunImpl() override { - std::vector> tensors; - auto* local_scope = scope_->FindVar(kLocalExecScopeName)->Get(); - for (auto& var_name : names_) { - auto* var = local_scope->FindVar(var_name); - PADDLE_ENFORCE(var != nullptr, - string::Sprintf("Local Scope not has var %s", var_name)); - if (var->IsType()) { - tensors.emplace_back(var->GetMutable()->MoveMemoryHolder()); - } else if (var->IsType()) { - tensors.emplace_back(var->GetMutable() - ->mutable_value() - ->MoveMemoryHolder()); - } else if (var->IsType()) { - LoDTensorArray* tensor_array = var->GetMutable(); - for (auto& tensor : *tensor_array) { - tensors.emplace_back(tensor.MoveMemoryHolder()); - } - } - } - if (!tensors.empty()) { - ClearTensors(tensors); - } - } - - private: - void ClearTensors( - const std::vector>& tensors) { - if (platform::is_cpu_place(place_)) { - ClearCPUTensors(tensors); - } else { - ClearGPUTensors(tensors); - } - } - - void ClearCPUTensors( - const std::vector>& tensors) { - auto* gc = dynamic_cast(gc_); - if (gc != nullptr) { - gc->Add(tensors); - } - } - - void ClearGPUTensors( - const std::vector>& tensors) { -#ifdef PADDLE_WITH_CUDA - auto* gc = dynamic_cast(gc_); - if (gc != nullptr) { - auto compute_stream = dev_ctx_->stream(); - auto callback_stream = gc->stream(); - auto callback_func = [=]() { - PADDLE_ENFORCE(cudaEventRecord(event_, compute_stream)); - PADDLE_ENFORCE(cudaStreamWaitEvent(callback_stream, event_, 0)); - }; - gc_->Add(tensors, callback_func); - } else { - gc_->Add(tensors); - } - } - - bool IsStreamGarabageCollector() const { - return dynamic_cast(gc_) != nullptr; -#endif - } - - const Scope* scope_; - const platform::Place place_; - std::vector names_; - GarbageCollector* gc_; -#ifdef PADDLE_WITH_CUDA - platform::CUDADeviceContext* dev_ctx_; - cudaEvent_t event_; -#endif -}; - -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/details/fetch_barrier_op_handle.cc b/paddle/fluid/framework/details/fetch_barrier_op_handle.cc new file mode 100644 index 0000000000000000000000000000000000000000..019ecfbb61028537692c8fdeb874c6c490f75430 --- /dev/null +++ b/paddle/fluid/framework/details/fetch_barrier_op_handle.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/fetch_barrier_op_handle.h" + +#include + +namespace paddle { +namespace framework { +namespace details { +FetchBarrierOpHandle::FetchBarrierOpHandle( + ir::Node *node, const std::vector &local_scopes, + const std::vector &places) + // fetch_barrier op always run on place0, but output on all places. + : OpHandleBase(node), + op_(framework::OpRegistry::CreateOp(*node->Op())), + local_scopes_(local_scopes), + places_(places), + run_scope_(local_scopes[0]), + place_(places[0]) { + for (auto &p : places) { + this->SetDeviceContext(p, platform::DeviceContextPool::Instance().Get(p)); + } +} + +bool FetchBarrierOpHandle::IsMultiDeviceTransfer() { + // override IsMultiDeviceTransfer to return true + return true; +} + +void FetchBarrierOpHandle::RunImpl() { + WaitInputVarGenerated(place_); + + auto run_func = [this]() { + op_->Run(*run_scope_->FindVar(kLocalExecScopeName)->Get(), place_); + }; + + if (is_lock_and_record_event_free_) { + run_func(); + } else { + this->RunAndRecordEvent(run_func); + } +} + +bool FetchBarrierOpHandle::NeedWait(VarHandleBase *in_var) { + bool need_wait = + in_var && in_var->GeneratedOp() && + in_var->GeneratedOp()->DeviceContext(place_) != dev_ctxes_.at(place_); + return need_wait; +} + +std::string FetchBarrierOpHandle::Name() const { return op_->Type(); } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/fetch_barrier_op_handle.h b/paddle/fluid/framework/details/fetch_barrier_op_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..b4d12785e0345c887f179bc53c8446dc1438f889 --- /dev/null +++ b/paddle/fluid/framework/details/fetch_barrier_op_handle.h @@ -0,0 +1,61 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +// **NOTE**: fetch_barrier op is special it outputs all recved variables on +// all places if there are multiple places, must init with +// multiple dev_ctxes_ !!!! + +struct FetchBarrierOpHandle : public OpHandleBase { + public: + FetchBarrierOpHandle(ir::Node *node, const std::vector &local_scopes, + const std::vector &places); + + bool IsMultiDeviceTransfer() override; + + std::string Name() const override; + + protected: + void RunImpl() override; + + bool NeedWait(VarHandleBase *in_var) override; + + private: + std::unique_ptr op_; + std::vector local_scopes_; + std::vector places_; + Scope *run_scope_; + platform::Place place_; + + bool is_lock_and_record_event_free_{false}; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/inplace_op_pass.cc b/paddle/fluid/framework/details/inplace_op_pass.cc index 8d4717ad19d4ca0525eac4d1a0dfe6d0076a8c09..88f26b41618e4b74766f2caa00ad29fd912f48f9 100644 --- a/paddle/fluid/framework/details/inplace_op_pass.cc +++ b/paddle/fluid/framework/details/inplace_op_pass.cc @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include #include @@ -148,12 +150,14 @@ std::unique_ptr InplacePass::ApplyImpl( view_.Build(graph.get()); InitSSAGraphNodes(); + auto cnt = 0; for (auto* op : view_.AllOps()) { + VLOG(4) << "Handle op " << cnt++ << ": " << op->Name(); if (FLAGS_enable_inplace_whitelist && !whitelist_.count(op->Name())) continue; TryInplaceOpInputOutput(op, graph.get()); } - graph->ResolveHazard(var_nodes_); + // graph->ResolveHazard(var_nodes_); return graph; } @@ -264,13 +268,10 @@ void InplacePass::WithdrawModify(const NodeSwapQueue& nodes, void InplacePass::TryInplaceOpInputOutput(ir::Node* op, ir::Graph* graph) const { VLOG(4) << "Try to inplace op " << op->Name(); - // FIXME(liuwei1031): Graph is not aware of the existence of BlockDescs and - // ProgramDescs. - // The operations related to BlockDesc or ProgramDesc should perform on Graph - // or Node directly! - PADDLE_ENFORCE(op->Op() != nullptr && op->Op()->Block() != nullptr, - "op_desc is nullptr"); + // PADDLE_ENFORCE(op->Op() != nullptr && op->Op()->Block() != nullptr, + // "op_desc is nullptr"); // some pre-requirments need to meet if the op want to inplaced. + PADDLE_ENFORCE(op->Op() != nullptr, "op_desc is nullptr"); auto* op_desc = op->Op(); auto& infer_inplace = @@ -281,21 +282,58 @@ void InplacePass::TryInplaceOpInputOutput(ir::Node* op, PADDLE_ENFORCE(static_cast(infer_inplace), "%s's infer_inplace has not been registered", op_desc->Type()); - auto* block = op_desc->Block(); - auto in_to_outs = infer_inplace(*op_desc, block); + auto in_to_outs = infer_inplace(*op_desc); auto& all_ops = view_.AllOps(); auto cursor = std::find(all_ops.begin(), all_ops.end(), op); size_t idx = std::distance(all_ops.begin(), cursor); for (auto& pair : in_to_outs) { - auto& in_var_name = pair.first; - auto& out_var_name = pair.second; + auto& in_para_name = pair.first; + auto& out_para_name = pair.second; + + auto input_vars = op->Op()->Input(in_para_name); + if (!input_vars.size()) { + VLOG(4) << "Parameter " << in_para_name << " is empty skip " + << in_para_name << " => " << out_para_name << " pair"; + continue; + } + auto output_vars = op->Op()->Output(out_para_name); + if (!output_vars.size()) { + VLOG(4) << "Parameter " << out_para_name << " is empty skip " + << in_para_name << " => " << out_para_name << " pair"; + continue; + } + auto in_var_name = input_vars.at(0); + auto out_var_name = output_vars.at(0); auto* in_node = view_.GetNodeByName(in_var_name, op->inputs); auto* out_node = view_.GetNodeByName(out_var_name, op->outputs); + VLOG(4) << "Try to inplace " << in_var_name << " with " << out_var_name; + + bool can_replace = true; + if (in_var_name == out_var_name) { + can_replace = false; + VLOG(4) << "SKIP: Input variable " << in_var_name << " & Output variable " + << out_var_name << " are the same"; + } else if (!NodeCanReused(in_node)) { + can_replace = false; + VLOG(4) << "SKIP: Input varialbe " << in_var_name << "cannot be reused"; + } else if (!NodeCanReused(out_node)) { + can_replace = false; + VLOG(4) << "SKIP: Output variable " << out_var_name + << " cannot be reused"; + } else if (details::NodeSize(*in_node->Var()) != + details::NodeSize(*out_node->Var())) { + can_replace = false; + VLOG(4) << "SKIP: Input and Output varialbe size not match"; + } + + if (!can_replace) continue; + // 2. there is no external pending op on the input node - if (view_.PendingOpsOnVar(in_node).size() > 1) { + // if (view_.PendingOpsOnVar(in_node).size() > 1) { + if (in_node->outputs.size() > 1 && !view_.CheckDeps(in_node, op)) { VLOG(4) << string::Sprintf( "Skiped pair %s => %s. %s input has external dependency." "inplace such pair will overwrite the memory.", @@ -342,6 +380,97 @@ void InplacePass::TryInplaceOpInputOutput(ir::Node* op, } } +void GraphView::TopoSort(ir::Graph* graph) { + // + ops_.clear(); + auto deps_num = [](ir::Node* op) { + auto cnt = 0; + for (auto& var : op->inputs) + if (var->inputs.size() > 0) ++cnt; + return cnt; + }; + + std::queue> ready_ops; + + int level = 0; + auto nodes = graph->Nodes(); + std::unordered_map deps_map; + for (auto& node : nodes) { + if (node->IsOp() && node->Op() != nullptr) { + deps_map[node] = deps_num(node); + if (0 == deps_map[node]) { + ready_ops.push({node, level}); + } + } + } + + while (!ready_ops.empty()) { + auto item = ready_ops.front(); + ready_ops.pop(); + + ops_.emplace_back(item.first); + // record level when pop from queue + op_level_[item.first] = item.second; + + for (auto node : item.first->outputs) { + for (auto op : node->outputs) { + --deps_map[op]; + if (deps_map[op] == 0) ready_ops.push({op, item.second + 1}); + } + } + } + + bool all_ops_checked = true; + for (auto& node : nodes) { + if (node->IsOp() && node->Op() != nullptr && deps_map[node] > 0) { + all_ops_checked = false; + break; + } + } + + PADDLE_ENFORCE(all_ops_checked, "All ops deps should be 0 after analysis"); +} + +// return true if current op node depeneds on all other op that use the same +// variable node +bool GraphView::CheckDeps(ir::Node* var, ir::Node* current_op) const { + // get op list that rely on the same variable + auto op_list = var->outputs; + for (auto& op : op_list) { + if (op == current_op) continue; + + VLOG(4) << " GraphView::CheckDeps : " << op->Name() << " & " + << current_op->Name(); + if (!CheckOpDeps(op, current_op)) return false; + VLOG(4) << ""; + } + return true; +} + +// check if op2 depends on op1's output +bool GraphView::CheckOpDeps(ir::Node* op1, ir::Node* op2) const { + auto print_op = [&](ir::Node* op, const char* name) { + std::ostringstream os; + os << " " << name << " : " << op->Name() << " "; + os << "Input args : "; + for (auto& arg : op->inputs) os << arg->Name() << " "; + os << "Output args : "; + for (auto& arg : op->outputs) os << arg->Name() << " "; + os << "Level : " << op_level_.at(op); + VLOG(4) << os.str(); + }; + print_op(op1, "OP1"); + print_op(op2, "OP2"); + + if (op1 == op2) return true; + if (op_level_.at(op1) >= op_level_.at(op2)) return false; + + for (auto& var : op2->inputs) + if (var->inputs.size() > 0 && CheckOpDeps(op1, var->inputs[0])) return true; + + return false; +} + ir::Node* GraphView::GetNodeByName(const std::string& name, const std::vector& nodes) const { // nodes should be op->inputs/outputs @@ -387,22 +516,7 @@ void GraphView::Build(ir::Graph* g) { // Because we insert some new created node. Which may have data race between // nodes. // resolve data harzards depends on the var nodes in right order. - ops_ = SortOpLikeDescOrder(*g); - - // 1. track the nodes which reused previous node in Python memory optimize. - // these node can not be inplaced, otherwise may generate a circle in graph. - std::unordered_set all_vars; - for (auto& node : g->Nodes()) { - if (node->IsVar()) continue; - for (auto& out : node->outputs) { - if (out->IsCtrlVar() || out->Var() == nullptr) continue; - if (all_vars.count(out->Name())) { - dup_nodes_.emplace(out->Name()); - } else { - all_vars.emplace(out->Name()); - } - } - } + TopoSort(g); // 2. track the nodes which used by parameter server. // these node can not be inplaced, otherwise trainer diff --git a/paddle/fluid/framework/details/inplace_op_pass.h b/paddle/fluid/framework/details/inplace_op_pass.h index 7be7f311852d2b64ce95e1a939371760d03d296b..01964ba8fc43fa86bb99c185fa20b056fddbffee 100644 --- a/paddle/fluid/framework/details/inplace_op_pass.h +++ b/paddle/fluid/framework/details/inplace_op_pass.h @@ -14,6 +14,7 @@ #pragma once #include +#include #include #include #include @@ -50,10 +51,15 @@ class GraphView { // map the parameter and gradient, must be skipped. bool InSkipSet(const std::string& var) const; + bool CheckDeps(ir::Node* var, ir::Node* current_op) const; + bool CheckOpDeps(ir::Node* op1, ir::Node* op2) const; + void TopoSort(ir::Graph* g); + private: std::vector ops_; std::unordered_set dup_nodes_; // mem opt affect nodes std::map> adj_list_; + std::unordered_map op_level_; }; // swap pairs in sequence diff --git a/paddle/fluid/framework/details/memory_optimize_helper.cc b/paddle/fluid/framework/details/memory_optimize_helper.cc index 533d3269be350de35954e575965fe7a089941058..894d7dad2e623649fe96b00bb515c9605c89a404 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper.cc @@ -190,7 +190,7 @@ struct NodeComparator { auto rhs_shape = rhs_desc->GetShape(); if ((lhs_shape[0] == -1 && rhs_shape[0] == -1) || (lhs_shape[0] != -1 && rhs_shape[0] != -1)) { - return NodeSize(lhs) <= NodeSize(rhs); + return NodeSize(lhs) == NodeSize(rhs); } else { return false; } @@ -449,6 +449,7 @@ void ControlFlowGraph::LiveVariableAnalysis() { live_in_[op].insert(var); } for (auto& var : defs_[op]) { + if (uses_[op].count(var)) continue; live_in_[op].erase(var); } diff --git a/paddle/fluid/framework/details/memory_optimize_helper_test.cc b/paddle/fluid/framework/details/memory_optimize_helper_test.cc index 5389e76e0c65c7c0ee23004ca1b0a56efb4c54fe..453943af0f123a08b870f11dacb78a5fbd954a56 100644 --- a/paddle/fluid/framework/details/memory_optimize_helper_test.cc +++ b/paddle/fluid/framework/details/memory_optimize_helper_test.cc @@ -142,15 +142,16 @@ TEST(OrderedSet, FindBestFitNode) { for (auto& node : nodes) { pool.Insert(node.get()); } - + // FIXME(liuwei1031) this API has changed, + // disable these tests temporarily // FindNextBestFitNode - auto* n = nodes[0].get(); - auto* cache = pool.FindBestFitNode(n); - PADDLE_ENFORCE(cache->Name() == "a"); - cache = pool.FindNextBestFitNode(n, cache); - PADDLE_ENFORCE(cache->Name() == "c"); - cache = pool.FindNextBestFitNode(n, cache); - PADDLE_ENFORCE(cache->Name() == "b"); + // auto* n = nodes[0].get(); + // auto* cache = pool.FindBestFitNode(n); + // PADDLE_ENFORCE(cache->Name() == "a"); + // cache = pool.FindNextBestFitNode(n, cache); + // PADDLE_ENFORCE(cache->Name() == "c"); + // cache = pool.FindNextBestFitNode(n, cache); + // PADDLE_ENFORCE(cache->Name() == "b"); } } // namespace details diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index 125dbf746c3880e142af4d4bffd3ccda8654c0a1..253cf5b4a8221ad6a1f0c70f2bebccb589a5668e 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -23,6 +23,7 @@ #include "paddle/fluid/framework/details/all_reduce_op_handle.h" #include "paddle/fluid/framework/details/broadcast_op_handle.h" #include "paddle/fluid/framework/details/computation_op_handle.h" +#include "paddle/fluid/framework/details/fetch_barrier_op_handle.h" #include "paddle/fluid/framework/details/fused_broadcast_op_handle.h" #include "paddle/fluid/framework/details/reduce_op_handle.h" #include "paddle/fluid/framework/details/rpc_op_handle.h" @@ -851,9 +852,17 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const { PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s", node->Op()->Type()); - result->Get(kGraphOps).emplace_back(new RPCOpHandle( - result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id], - node->Op()->Type(), places_[op_dev_id])); + + // Create fetch_barrier op handle to enable output on all devices. + // **NOTE** fetch_barrier should output variables list same as recv op does. + if (node->Op()->Type() == "fetch_barrier") { + result->Get(kGraphOps).emplace_back(new FetchBarrierOpHandle( + result->CreateOpNode(node->Op()), local_scopes_, places_)); + } else { + result->Get(kGraphOps).emplace_back(new RPCOpHandle( + result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id], + node->Op()->Type(), places_[op_dev_id])); + } if (node->Op()->Type() == "send") { CreateOpHandleIOs(result, node, op_dev_id); diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 158da6f606f3f5a7062a4aaed7cf7e3fe71c817a..413b14961631b3459e0d05af685ad1c5395844c2 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -55,7 +55,7 @@ void OpHandleBase::Run(bool use_cuda) { if (out_var_handle) { int dev_id = boost::get(out_var_handle->place()).device; - out_var_handle->SetGenerateEvent(events_[dev_id]); + out_var_handle->SetGenerateEvent(events_.at(dev_id)); } } } else { @@ -71,7 +71,7 @@ void OpHandleBase::Run(bool use_cuda) { "The place of input(%s) is not consistent with the " "place of current op(%s).", out_var_handle->Name(), Name()); - out_var_handle->SetGenerateEvent(events_[dev_id]); + out_var_handle->SetGenerateEvent(events_.at(dev_id)); } } } diff --git a/paddle/fluid/framework/details/op_registry.h b/paddle/fluid/framework/details/op_registry.h index e13ff99f3fdb564141531b401565c932fa1f3dab..e5b58ec68761469a03929435d1a73bf0a2d1660e 100644 --- a/paddle/fluid/framework/details/op_registry.h +++ b/paddle/fluid/framework/details/op_registry.h @@ -21,6 +21,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/grad_op_desc_maker.h" #include "paddle/fluid/framework/inplace_op_inference.h" +#include "paddle/fluid/framework/no_need_buffer_vars_inference.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_proto_maker.h" #include "paddle/fluid/framework/operator.h" @@ -36,27 +37,86 @@ enum OpInfoFillType { kGradOpDescMaker = 2, kVarTypeInference = 3, kShapeInference = 4, - kInplaceOpInference = 5 + kInplaceOpInference = 5, + kNoNeedBufferVarsInference = 6, + kUnknown = -1 }; +namespace internal { +template +struct TypePair { + using Type = T; + static constexpr OpInfoFillType kFillType = kType; +}; + +using OpRegistryClasses = std::tuple< // NOLINT + TypePair, // NOLINT + TypePair, // NOLINT + TypePair, // NOLINT + TypePair, // NOLINT + TypePair, // NOLINT + TypePair, // NOLINT + TypePair // NOLINT + >; + +static constexpr int kOpRegistryClassNumber = + std::tuple_size::value; + +template +struct IsMatchedBaseTypeImpl { + using PairType = typename std::tuple_element::type; + static constexpr bool kValue = + std::is_base_of::value; +}; + +template +struct IsMatchedBaseTypeImpl { + static constexpr bool kValue = false; +}; + +template +static inline constexpr bool IsMatchedBaseType() { + return IsMatchedBaseTypeImpl< + T, kPos, (kPos >= 0 && kPos < kOpRegistryClassNumber)>::kValue; +} + +template +struct OpInfoFillTypeGetterImpl {}; + +// This case should not happen +template +struct OpInfoFillTypeGetterImpl {}; + +template +struct OpInfoFillTypeGetterImpl { + static constexpr OpInfoFillType kType = kUnknown; +}; + +template +struct OpInfoFillTypeGetterImpl { + static constexpr OpInfoFillType kType = + OpInfoFillTypeGetterImpl()>::kType; +}; + +template +struct OpInfoFillTypeGetterImpl { + using PairType = typename std::tuple_element::type; + static constexpr OpInfoFillType kType = PairType::kFillType; +}; + +template +using OpInfoFillTypeGetter = + OpInfoFillTypeGetterImpl()>; + +} // namespace internal + template struct OpInfoFillTypeID { static constexpr OpInfoFillType ID() { - return std::is_base_of::value - ? kOperator - : (std::is_base_of::value - ? kOpProtoAndCheckerMaker - : (std::is_base_of::value - ? kGradOpDescMaker - : (std::is_base_of::value - ? kVarTypeInference - : (std::is_base_of::value - ? kShapeInference - : (std::is_base_of< - InplaceOpInference, T>::value - ? kInplaceOpInference - : static_cast( - -1)))))); + return internal::OpInfoFillTypeGetter::kType; } }; @@ -149,9 +209,21 @@ struct OpInfoFiller { template struct OpInfoFiller { void operator()(const char* op_type, OpInfo* info) const { - info->infer_inplace_ = [](const OpDesc& op_desc, BlockDesc* block) { + info->infer_inplace_ = [](const OpDesc& op_desc) { T infer; - return infer(op_desc, block); + return infer(op_desc); + }; + } +}; + +template +struct OpInfoFiller { + void operator()(const char* op_type, OpInfo* info) const { + info->infer_no_need_buffer_vars_ = [](const VariableNameMap& inputs, + const VariableNameMap& outputs, + const AttributeMap& attrs) { + T infer(inputs, outputs, attrs); + return infer(); }; } }; diff --git a/paddle/fluid/framework/details/reference_count_pass.cc b/paddle/fluid/framework/details/reference_count_pass.cc index 6092143449bc8e20117e7021bd44553cf64ae5b5..0c3d8d5caec0015c5696223db2e4b75a8d79e5e1 100644 --- a/paddle/fluid/framework/details/reference_count_pass.cc +++ b/paddle/fluid/framework/details/reference_count_pass.cc @@ -193,6 +193,79 @@ ExtractComputationOpFromLastLivedVar(VarHandle *var, size_t scope_idx, return shrink_func(computation_op); } +/** + * Shrink op dependencies according to no need buffer vars. + * + * If some ops do not need Tensor buffer of any input, + * just remove the dependency of this op, i.e, decrease reference count. + * + * For example, input Y of elementwise_add_grad op is only used to infer shape + * and lod of Y@GRAD, we do not need the buffer of input Y. Data buffer of + * input Y can be collected before elementwise_add_grad op runs. + * + * This method returns whether the dependency count decreases to 0, and + * shrinks op dependency if possible. + */ +static bool ShrinkNoNeedBufferVarOpDependency( + const std::string &var_name, + std::unordered_set *op_handles) { + std::vector skip_ops; + for (auto *op_handle : *op_handles) { + auto *op_base = op_handle->GetOp(); + auto &inferer = op_base->Info().NoNeedBufferVarsInferer(); + if (!inferer) { + continue; + } + + std::unordered_set no_need_buffer_vars = + inferer(op_base->Inputs(), op_base->Outputs(), op_base->Attrs()); + + // Check whether var_name occurs in other inputs or outputs of the op + // If it occurs, we cannot decrease the dependency number. + bool occurred_in_other_vars = false; + for (auto &in_pair : op_base->Inputs()) { + if (no_need_buffer_vars.count(in_pair.first) > 0) { + continue; + } + + auto &args = in_pair.second; + auto iter = std::find(args.begin(), args.end(), var_name); + if (iter != args.end()) { + occurred_in_other_vars = true; + break; + } + } + + if (occurred_in_other_vars) { + continue; + } + + for (auto &out_pair : op_base->Outputs()) { + auto &args = out_pair.second; + auto iter = std::find(args.begin(), args.end(), var_name); + if (iter != args.end()) { + occurred_in_other_vars = true; + break; + } + } + + if (!occurred_in_other_vars) { + VLOG(2) << "Shrink var " << var_name << " in op " << op_handle->Name(); + skip_ops.emplace_back(op_handle); + } + } + + if (skip_ops.size() == op_handles->size()) { + op_handles->clear(); + return true; + } else { + for (auto *skip_op : skip_ops) { + op_handles->erase(skip_op); + } + return false; + } +} + std::unique_ptr ReferenceCountPass::ApplyImpl( std::unique_ptr graph) const { auto &ref_cnts = Get>(kGlobalReferenceCount); @@ -229,17 +302,43 @@ std::unique_ptr ReferenceCountPass::ApplyImpl( continue; } - bool ok; - auto result = ExtractComputationOpFromLastLivedVar( - name_var_pair.second.back(), i, shrink_func, &ok); + auto &var_name = name_var_pair.first; + auto &var_handles = name_var_pair.second; + + for (auto iter = var_handles.rbegin(); iter != var_handles.rend(); + ++iter) { + bool ok; + auto result = + ExtractComputationOpFromLastLivedVar(*iter, i, shrink_func, &ok); + + // Seldomly, some vars may have no pending or preceding computation ops + // Just break; + if (!ok) break; + VLOG(10) << "Extract " << result.size() << " ops of var " << var_name; + + size_t original_op_deps = result.size(); + // If all ops do not need buffer of var_name, calculate reference count + // of the previous version of var_name. + if (ShrinkNoNeedBufferVarOpDependency(var_name, &result)) { + VLOG(10) << "Try to precede reference count computing at var " + << var_name; + continue; + } + + size_t final_op_deps = result.size(); + if (final_op_deps < original_op_deps) { + VLOG(5) << "Shrink op deps from " << original_op_deps << " to " + << final_op_deps; + } - if (ok) { - auto &var_name = name_var_pair.first; PADDLE_ENFORCE(!result.empty(), "Last living ops of %s cannot be empty", var_name); ref_cnts[i].emplace(var_name, result.size()); last_live_ops_of_vars[i].emplace(var_name, std::move(result)); } + + // Seldomly, all preceding trying failed. + // Just skip this corner case } } diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 99192292b0be992d5ff0ecebba6294b9ba27e958..0d4334f193dcb067a49f5e67b69d21531c7048bd 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -19,6 +19,7 @@ limitations under the License. */ #include #include +#include "paddle/fluid/framework/executor_gc_helper.h" #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor_array.h" @@ -48,97 +49,23 @@ namespace { int kProgramId = -1; } // namespace -static std::unordered_map GetNonPersistableReferenceCounts( - const BlockDesc& block, const std::vector& skip_var_list) { - std::unordered_map ref_cnts; - std::unordered_set skip_vars(skip_var_list.begin(), - skip_var_list.end()); - - auto update_ref_cnts = [&](OpDesc* op_desc, const VariableNameMap& name_map) { - for (auto& name_pair : name_map) { - for (auto& name : name_pair.second) { - if (skip_vars.count(name)) continue; - auto* var_desc = block.FindVar(name); - if (var_desc == nullptr || var_desc->Persistable()) continue; - auto type = var_desc->Proto()->type().type(); - if (type != proto::VarType::LOD_TENSOR && - type != proto::VarType::SELECTED_ROWS && - type != proto::VarType::LOD_TENSOR_ARRAY) { - continue; - } - ++ref_cnts[name]; - } - } - }; - - for (auto op_desc : block.AllOps()) { - update_ref_cnts(op_desc, op_desc->Inputs()); - update_ref_cnts(op_desc, op_desc->Outputs()); - } - return ref_cnts; -} - ExecutorPrepareContext::ExecutorPrepareContext( - const framework::ProgramDesc& prog, size_t block_id, - const std::vector& keep_vars, bool force_disable_gc) - : prog_(prog), block_id_(block_id), force_disable_gc_(force_disable_gc) { - if (GetEagerDeletionThreshold() >= 0 && !force_disable_gc_) { - global_ref_cnts_ = - GetNonPersistableReferenceCounts(prog.Block(block_id), keep_vars); + const framework::ProgramDesc& prog, size_t block_id) + : prog_(prog), block_id_(block_id) {} + +void ExecutorPrepareContext::PrepareUnusedVars( + const std::vector& keep_vars, bool force_disable_gc) { + force_disable_gc_ = force_disable_gc; + if (GetEagerDeletionThreshold() < 0 || force_disable_gc_) { + return; } + unused_vars_ = GetUnusedVars(prog_.Block(block_id_), ops_, keep_vars); } ExecutorPrepareContext::~ExecutorPrepareContext() { VLOG(5) << "destroy ExecutorPrepareContext"; } -static void DeleteUnusedTensors( - const Scope& scope, const OperatorBase* op, GarbageCollector* gc, - std::unordered_map* ref_cnts) { - std::deque> garbages; - - auto handler = [&](const VariableNameMap& name_map) { - for (auto& name_pair : name_map) { - for (auto& name : name_pair.second) { - auto it = ref_cnts->find(name); - if (it == ref_cnts->end()) continue; - if (--(it->second) != 0) { - continue; - } - auto* var = scope.FindVar(name); - if (var == nullptr) { - continue; - } - - VLOG(2) << "Erase variable " << name; - if (var->IsType()) { - garbages.emplace_back( - var->GetMutable()->MoveMemoryHolder()); - } else if (var->IsType()) { - garbages.emplace_back(var->GetMutable() - ->mutable_value() - ->MoveMemoryHolder()); - } else if (var->IsType()) { - auto* lod_tensor_arr = var->GetMutable(); - for (auto& t : *lod_tensor_arr) { - garbages.emplace_back(t.MoveMemoryHolder()); - } - } else { - PADDLE_THROW("Type %s of %s is not supported eager deletion", - framework::ToTypeName(var->Type()), name); - } - } - } - }; - - handler(op->Inputs()); - handler(op->Outputs()); - - if (!garbages.empty()) { - gc->Add(std::move(garbages)); - } -} - Executor::Executor(const platform::Place& place) : place_(place) {} void Executor::Close() { @@ -362,8 +289,8 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, std::unique_ptr Executor::Prepare( const ProgramDesc& program, int block_id, const std::vector& skip_ref_cnt_vars, bool force_disable_gc) { - std::unique_ptr ctx(new ExecutorPrepareContext( - program, block_id, skip_ref_cnt_vars, force_disable_gc)); + std::unique_ptr ctx( + new ExecutorPrepareContext(program, block_id)); PADDLE_ENFORCE_LT(static_cast(block_id), program.Size()); auto& block = program.Block(block_id); for (auto& op_desc : block.AllOps()) { @@ -375,6 +302,7 @@ std::unique_ptr Executor::Prepare( ctx->prog_.Block(ctx->block_id_), &ctx->ops_); } #endif + ctx->PrepareUnusedVars(skip_ref_cnt_vars, force_disable_gc); return ctx; } @@ -389,19 +317,17 @@ std::vector> Executor::Prepare( std::vector> result; size_t idx = 0; for (auto& bid : block_ids) { - ExecutorPrepareContext* ctx; - if (skip_ref_cnt_vars.empty()) { - ctx = new ExecutorPrepareContext(program, bid, std::vector(), - force_disable_gc); - } else { - ctx = new ExecutorPrepareContext(program, bid, skip_ref_cnt_vars[idx], - force_disable_gc); - } PADDLE_ENFORCE_LT(static_cast(bid), program.Size()); + auto* ctx = new ExecutorPrepareContext(program, bid); auto& block = program.Block(bid); for (auto& op_desc : block.AllOps()) { ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc)); } + if (skip_ref_cnt_vars.empty()) { + ctx->PrepareUnusedVars(std::vector(), force_disable_gc); + } else { + ctx->PrepareUnusedVars(skip_ref_cnt_vars[idx], force_disable_gc); + } result.push_back(std::shared_ptr(ctx)); ++idx; } @@ -425,7 +351,6 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, // FIXME(zjl): recurrent_op is rather complex, we would // disable gc forcely in recurrent_op if (!ctx->force_disable_gc_ && max_memory_size >= 0) { - ctx->ResetReferenceCount(); #ifdef PADDLE_WITH_CUDA if (platform::is_gpu_place(place_)) { if (IsFastEagerDeletionModeEnabled()) { @@ -453,8 +378,7 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, op->Run(*local_scope, place_); if (gc) { - DeleteUnusedTensors(*local_scope, op.get(), gc.get(), - &(ctx->runtime_ref_cnts_)); + DeleteUnusedTensors(*local_scope, op.get(), ctx->unused_vars_, gc.get()); } } diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 65cb9e51ab2c9208b6bfbbed54f4136ffbd627ff..825224437e0cdda03c56faf1b50833abd8b8c2ab 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -30,22 +30,20 @@ namespace paddle { namespace framework { struct ExecutorPrepareContext { - ExecutorPrepareContext(const framework::ProgramDesc& prog, size_t block_id, - const std::vector& skip_ref_cnt_vars = - std::vector(), - bool force_disable_gc = false); + ExecutorPrepareContext(const framework::ProgramDesc& prog, size_t block_id); ~ExecutorPrepareContext(); - void ResetReferenceCount() { runtime_ref_cnts_ = global_ref_cnts_; } + void PrepareUnusedVars(const std::vector& keep_vars, + bool force_disable_gc = false); const framework::ProgramDesc& prog_; - size_t block_id_; - bool force_disable_gc_; + const size_t block_id_; + std::vector> ops_; - std::unordered_map global_ref_cnts_; - std::unordered_map runtime_ref_cnts_; + std::unordered_map> unused_vars_; + bool force_disable_gc_{false}; }; class Executor { diff --git a/paddle/fluid/framework/executor_gc_helper.cc b/paddle/fluid/framework/executor_gc_helper.cc new file mode 100644 index 0000000000000000000000000000000000000000..77b0977b5a47fdf4413e75c4e89cf638949e937f --- /dev/null +++ b/paddle/fluid/framework/executor_gc_helper.cc @@ -0,0 +1,189 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/executor_gc_helper.h" +#include +#include +#include +#include +#include +#include +#include "glog/logging.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace framework { + +struct OpInOutInfo { + public: + void Build(const OperatorBase *op) { + is_built_ = true; + auto &inferer = op->Info().NoNeedBufferVarsInferer(); + if (inferer) { + no_need_buffer_ins_ = inferer(op->Inputs(), op->Outputs(), op->Attrs()); + + if (no_need_buffer_ins_.empty()) return; + + for (auto &in_name_pair : op->Inputs()) { + if (no_need_buffer_ins_.count(in_name_pair.first) != 0) { + continue; + } + + for (auto &in_arg_name : in_name_pair.second) { + other_args_set_.insert(in_arg_name); + } + } + + for (auto &out_name_pair : op->Outputs()) { + for (auto &out_arg_name : out_name_pair.second) { + other_args_set_.insert(out_arg_name); + } + } + } + } + + bool IsBuilt() const { return is_built_; } + + bool IsInArgBufferNeeded(const std::string &in_arg_name) const { + return no_need_buffer_ins_.empty() || + other_args_set_.count(in_arg_name) != 0; + } + + private: + // A set to record unused buffer input vars of op + std::unordered_set no_need_buffer_ins_; + // A set to record other args of op (including in, out) + std::unordered_set other_args_set_; + bool is_built_{false}; +}; + +static bool VarCanBeDeleted(const std::string &name, const BlockDesc &block, + const std::unordered_set &skip_vars) { + if (skip_vars.count(name) != 0) { + return false; + } + + auto *var_desc = block.FindVar(name); + if (var_desc == nullptr || var_desc->Persistable()) { + return false; + } + + auto type = var_desc->Proto()->type().type(); + + return type == proto::VarType::LOD_TENSOR || + type == proto::VarType::SELECTED_ROWS || + type == proto::VarType::LOD_TENSOR_ARRAY; +} + +std::unordered_map> GetUnusedVars( + const BlockDesc &block, + const std::vector> &ops, + const std::vector &skip_var_list) { + std::unordered_set skip_vars(skip_var_list.begin(), + skip_var_list.end()); + + std::unordered_map var_op_idx_map; + + for (size_t i = 0; i < ops.size(); ++i) { + auto *op = ops[i].get(); + + OpInOutInfo info; + for (auto &name_pair : op->Inputs()) { + for (auto &name : name_pair.second) { + if (!VarCanBeDeleted(name, block, skip_vars)) { + continue; + } + + // var can be gc-ed + if (!info.IsBuilt()) { + info.Build(op); + } + + if (info.IsInArgBufferNeeded(name)) { + // Update the last living op of variable to current op + var_op_idx_map[name] = i; + } else { + VLOG(10) << "Skip reference count computing of variable " + << name_pair.first << "(" << name << ") in Operator " + << op->Type(); + } + } + } + + for (auto &name_pair : op->Outputs()) { + for (auto &name : name_pair.second) { + if (VarCanBeDeleted(name, block, skip_vars)) { + // Update the last living op of variable to current op + var_op_idx_map[name] = i; + } + } + } + } + + std::unordered_map> result; + for (auto &name_op_idx_pair : var_op_idx_map) { + auto &name = name_op_idx_pair.first; + size_t op_idx = name_op_idx_pair.second; + result[ops[op_idx].get()].emplace_back(name); + } + return result; +} + +void DeleteUnusedTensors( + const Scope &scope, OperatorBase *op, + const std::unordered_map> + &delete_vars_map, + GarbageCollector *gc) { + auto iter = delete_vars_map.find(op); + if (iter == delete_vars_map.end()) { + return; + } + + auto &delete_vars = iter->second; + + std::deque> garbages; + + for (auto &var_name : delete_vars) { + auto *var = scope.FindVar(var_name); + if (var == nullptr) { + continue; + } + + VLOG(2) << "Erase variable " << var_name; + if (var->IsType()) { + garbages.emplace_back(var->GetMutable()->MoveMemoryHolder()); + } else if (var->IsType()) { + garbages.emplace_back( + var->GetMutable()->mutable_value()->MoveMemoryHolder()); + } else if (var->IsType()) { + auto *lod_tensor_arr = var->GetMutable(); + for (auto &t : *lod_tensor_arr) { + garbages.emplace_back(t.MoveMemoryHolder()); + } + } else { + PADDLE_THROW("Type %s of %s is not supported eager deletion", + framework::ToTypeName(var->Type()), var_name); + } + } + + if (!garbages.empty()) { + gc->Add(std::move(garbages)); + } +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/executor_gc_helper.h b/paddle/fluid/framework/executor_gc_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..8553273f8242844d0203d7bcd90ea2090b65826c --- /dev/null +++ b/paddle/fluid/framework/executor_gc_helper.h @@ -0,0 +1,42 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "paddle/fluid/framework/garbage_collector.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/scope.h" + +namespace paddle { +namespace framework { + +// Result map: op -> variable names that can be deleted after op runs +std::unordered_map> GetUnusedVars( + const BlockDesc &block, + const std::vector> &ops, + const std::vector &skip_vars); + +// Collect unused tensors after op runs +void DeleteUnusedTensors( + const Scope &scope, OperatorBase *op, + const std::unordered_map> + &delete_vars_map, + GarbageCollector *gc); + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/garbage_collector.cc b/paddle/fluid/framework/garbage_collector.cc index 54d9d0dc018b08decb2ff8965659bab98e81f3ab..789b2ef80ec09a69ca227a27c61dd58e58a2fc04 100644 --- a/paddle/fluid/framework/garbage_collector.cc +++ b/paddle/fluid/framework/garbage_collector.cc @@ -13,14 +13,36 @@ // limitations under the License. #include +#include +#include +#include +#include // NOLINT +#include #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cuda_device_guard.h" #endif +#include "gflags/gflags.h" +#include "glog/logging.h" #include "paddle/fluid/framework/garbage_collector.h" namespace paddle { namespace framework { +DEFINE_double( + eager_delete_tensor_gb, -1.0, + "Memory size threshold (GB) when the garbage collector clear tensors." + "Disabled when this value is less than 0"); + +DEFINE_bool(fast_eager_deletion_mode, true, + "Fast eager deletion mode. If enabled, memory would release " + "immediately without waiting GPU kernel ends."); + +DEFINE_double(memory_fraction_of_eager_deletion, 1.0, + "Fraction of eager deletion. If less than 1.0, all variables in " + "the program would be sorted according to its memory size, and " + "only the FLAGS_memory_fraction_of_eager_deletion of the largest " + "variables would be deleted."); + GarbageCollector::GarbageCollector(const platform::Place &place, size_t max_memory_size) : max_memory_size_((std::max)(max_memory_size, static_cast(1))) { @@ -85,5 +107,25 @@ void StreamGarbageCollector::ClearCallback( callback_manager_->AddCallback(callback); } #endif + +int64_t GetEagerDeletionThreshold() { + return FLAGS_eager_delete_tensor_gb < 0 + ? -1 + : static_cast(FLAGS_eager_delete_tensor_gb * + (static_cast(1) << 30)); +} + +bool IsFastEagerDeletionModeEnabled() { return FLAGS_fast_eager_deletion_mode; } + +void SetEagerDeletionMode(double threshold, double fraction, bool fast_mode) { + FLAGS_eager_delete_tensor_gb = threshold; + FLAGS_memory_fraction_of_eager_deletion = fraction; + FLAGS_fast_eager_deletion_mode = fast_mode; +} + +double GetEagerDeletionMemoryFraction() { + return FLAGS_memory_fraction_of_eager_deletion; +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/garbage_collector.h b/paddle/fluid/framework/garbage_collector.h index 2768671029c06562aa0d2e5eea3d3ff61d900ab5..f0b504627ae0cd99c8b4b15df3dcfc39a56507f2 100644 --- a/paddle/fluid/framework/garbage_collector.h +++ b/paddle/fluid/framework/garbage_collector.h @@ -18,6 +18,8 @@ #include #include #include // NOLINT +#include +#include "gflags/gflags.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { @@ -126,5 +128,12 @@ void GarbageCollector::Add(Container &&objs, Callback &&callback) { } } +int64_t GetEagerDeletionThreshold(); +bool IsFastEagerDeletionModeEnabled(); + +void SetEagerDeletionMode(double threshold, double fraction, bool fast_mode); + +double GetEagerDeletionMemoryFraction(); + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/inplace_op_inference.h b/paddle/fluid/framework/inplace_op_inference.h index a3ccf677c90e8466f6c89041979336d45c1ac942..df46d4f9a805b6e497a6f939e91ecf7dc395e7f0 100644 --- a/paddle/fluid/framework/inplace_op_inference.h +++ b/paddle/fluid/framework/inplace_op_inference.h @@ -17,8 +17,8 @@ #include #include #include +#include #include "glog/logging.h" -#include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/details/memory_optimize_helper.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/type_defs.h" @@ -32,55 +32,22 @@ namespace framework { then Out will inplaced use X's memory. The base class will do legality validation for both variables. */ + class InplaceOpInference { public: virtual ~InplaceOpInference() {} virtual std::unordered_map operator()( - const OpDesc& op_desc, BlockDesc* block) const = 0; -}; - -class InplaceInToOut : public InplaceOpInference { - public: - std::unordered_map operator()( - const OpDesc& op_desc, BlockDesc* block) const { - std::unordered_map ret; - auto in_out_var_names_pair = this->Apply(op_desc, block); - for (auto& pair : in_out_var_names_pair) { - PADDLE_ENFORCE(!op_desc.Input(pair.first).empty(), - string::Sprintf("op %s do not have input of %s!", - op_desc.Type(), pair.first)); - PADDLE_ENFORCE(!op_desc.Output(pair.second).empty(), - string::Sprintf("op %s do not have output of %s!", - op_desc.Type(), pair.second)); - auto& in_name = op_desc.Input(pair.first).at(0); - auto& out_name = op_desc.Output(pair.second).at(0); - - auto in = block->FindRecursiveOrCreateVar(in_name); - auto out = block->FindRecursiveOrCreateVar(out_name); - if (TryInplaceInputOutput(in, out)) ret.insert({in_name, out_name}); - } - return ret; - } - - protected: - virtual std::unordered_map Apply( - const OpDesc& op_desc, BlockDesc* block) const = 0; - - bool TryInplaceInputOutput(const VarDesc& in, const VarDesc& out) const { - return in.Name() != out.Name() && details::NodeCanReused(in) && - details::NodeCanReused(out) && - details::NodeSize(out) <= details::NodeSize(in); - } + const OpDesc& op_desc) const = 0; }; /* Inplace In and Out for operator only have an Input and an Output. For example, activation op. */ -class SingleOpInplaceInToOut : public InplaceInToOut { - protected: - std::unordered_map Apply( - const OpDesc& op_desc, BlockDesc* block) const override { +class SingleOpInplaceInToOut : public InplaceOpInference { + public: + std::unordered_map operator()( + const OpDesc& op_desc) const override { PADDLE_ENFORCE(!op_desc.InputNames().empty(), "Op inputs must not be empty"); PADDLE_ENFORCE(!op_desc.OutputNames().empty(), @@ -95,10 +62,10 @@ class SingleOpInplaceInToOut : public InplaceInToOut { Gradient op. Inplace output use it's Input. For example, Input@Grad->Input reuse strategy. */ -class GradOpInplaceInToOut : public InplaceInToOut { - protected: - std::unordered_map Apply( - const OpDesc& op_desc, BlockDesc* block) const override { +class GradOpInplaceInToOut : public InplaceOpInference { + public: + std::unordered_map operator()( + const OpDesc& op_desc) const override { std::unordered_map ret; std::unordered_set output_names(op_desc.OutputNames().begin(), op_desc.OutputNames().end()); diff --git a/paddle/fluid/framework/inplace_op_inference_test.cc b/paddle/fluid/framework/inplace_op_inference_test.cc index bf9d1dcd380cdff886301faf13b0015fd5a2ed5c..c93e562955fb36ddc4363fac862f3942758af35d 100644 --- a/paddle/fluid/framework/inplace_op_inference_test.cc +++ b/paddle/fluid/framework/inplace_op_inference_test.cc @@ -127,26 +127,20 @@ class MultiOutGradShapeInference : public framework::InferShapeBase { } }; -class MultiOutInplaceInToOut : public framework::InplaceInToOut { +class MultiOutInplaceInToOut : public framework::InplaceOpInference { public: - using framework::InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const OpDesc& op_desc, BlockDesc* block) const override { + std::unordered_map operator()( + const OpDesc& op_desc) const override { return std::unordered_map{ {"X", "Out"}, {"Y", "YOut"}, {"Z", "ZOut"}, }; } }; -class MultiOutGradInplaceInToOut : public framework::InplaceInToOut { +class MultiOutGradInplaceInToOut : public framework::InplaceOpInference { public: - using framework::InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const OpDesc& op_desc, BlockDesc* block) const override { + std::unordered_map operator()( + const OpDesc& op_desc) const override { return std::unordered_map{ {framework::GradVarName("YOut"), framework::GradVarName("Y")}, {framework::GradVarName("Out"), framework::GradVarName("X")}, @@ -171,118 +165,118 @@ REGISTER_OPERATOR(multi_out_grad, f::NOP, f::MultiOutGradInplaceInToOut, namespace paddle { namespace framework { -TEST(InferInplace, SingleOpInplaceInToOut) { - ProgramDesc prog; - auto* op = prog.MutableBlock(0)->AppendOp(); - op->SetType("single_op"); - op->SetInput("X", {"test2_a", "test2_b", "test2_c"}); - op->SetOutput("Out", {"test2_out"}); - - prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 64, 128, 128}); - prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_out"); - prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 128, 128}); - - auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; - auto in_to_outs = infer_inplace(*op, op->Block()); - EXPECT_EQ(in_to_outs.size(), 1ul); - auto it = in_to_outs.begin(); - EXPECT_EQ(it->first, "test2_a"); - EXPECT_EQ(it->second, "test2_out"); -} - -TEST(InferInplace, SingleGradOpInplaceInToOut) { - ProgramDesc prog; - auto* op = prog.MutableBlock(0)->AppendOp(); - op->SetType("single_op_grad"); - op->SetInput(GradVarName("Out"), {"test2_out"}); - op->SetOutput(GradVarName("X"), {"test2_a", "test2_b", "test2_c"}); - - prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("test2_out"); - prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 1024, 1024}); - - auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; - auto in_to_outs = infer_inplace(*op, op->Block()); - EXPECT_EQ(in_to_outs.size(), 1ul); - auto it = in_to_outs.begin(); - EXPECT_EQ(it->first, "test2_out"); - EXPECT_EQ(it->second, "test2_a"); -} - -TEST(InferInplace, MultiOutInplaceInToOut) { - ProgramDesc prog; - auto* op = prog.MutableBlock(0)->AppendOp(); - op->SetType("multi_out_op"); - op->SetInput("X", {"a0", "a1"}); - op->SetInput("Y", {"b0"}); - op->SetInput("Z", {"c0", "c1"}); - op->SetOutput("Out", {"o0"}); - op->SetOutput("YOut", {"y0"}); - op->SetOutput("ZOut", {"z0"}); - - prog.MutableBlock(0)->Var("a0")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("b0")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("c0")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("c1")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("o0"); - prog.MutableBlock(0)->Var("y0"); - prog.MutableBlock(0)->Var("z0"); - prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); - - auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; - auto in_to_outs = infer_inplace(*op, op->Block()); - EXPECT_EQ(in_to_outs.size(), 3ul); - std::unordered_map expects = { - {"a0", "o0"}, {"b0", "y0"}, {"c0", "z0"}, - }; - EXPECT_TRUE(expects == in_to_outs); -} - -TEST(InferInplace, MultiGradInplaceInToOut) { - ProgramDesc prog; - auto* op = prog.MutableBlock(0)->AppendOp(); - op->SetType("multi_out_grad"); - op->SetInput(GradVarName("Out"), {"o0"}); - op->SetInput(GradVarName("YOut"), {"y0"}); - op->SetInput(GradVarName("ZOut"), {"z0"}); - op->SetOutput(GradVarName("X"), {"a0", "a1"}); - op->SetOutput(GradVarName("Y"), {"b0"}); - op->SetOutput(GradVarName("Z"), {"c0", "c1"}); - - prog.MutableBlock(0)->Var("a0")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("b0")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("c0")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("c1")->SetType(proto::VarType::LOD_TENSOR); - prog.MutableBlock(0)->Var("o0"); - prog.MutableBlock(0)->Var("y0"); - prog.MutableBlock(0)->Var("z0"); - prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); - prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); - - auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; - auto in_to_outs = infer_inplace(*op, op->Block()); - - EXPECT_EQ(in_to_outs.size(), 3ul); - std::unordered_map expects = { - {"o0", "a0"}, {"y0", "b0"}, {"z0", "c0"}, - }; - EXPECT_TRUE(expects == in_to_outs); -} +// TEST(InferInplace, SingleOpInplaceInToOut) { +// ProgramDesc prog; +// auto* op = prog.MutableBlock(0)->AppendOp(); +// op->SetType("single_op"); +// op->SetInput("X", {"test2_a", "test2_b", "test2_c"}); +// op->SetOutput("Out", {"test2_out"}); +// +// prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 64, 128, 128}); +// prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("test2_out"); +// prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 128, 128}); +// +// auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; +// auto in_to_outs = infer_inplace(*op); +// EXPECT_EQ(in_to_outs.size(), 1ul); +// auto it = in_to_outs.begin(); +// EXPECT_EQ(it->first, "test2_a"); +// EXPECT_EQ(it->second, "test2_out"); +// } +// +// TEST(InferInplace, SingleGradOpInplaceInToOut) { +// ProgramDesc prog; +// auto* op = prog.MutableBlock(0)->AppendOp(); +// op->SetType("single_op_grad"); +// op->SetInput(GradVarName("Out"), {"test2_out"}); +// op->SetOutput(GradVarName("X"), {"test2_a", "test2_b", "test2_c"}); +// +// prog.MutableBlock(0)->Var("test2_a")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("test2_a")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("test2_b")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("test2_c")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("test2_out"); +// prog.MutableBlock(0)->Var("test2_out")->SetShape({32, 16, 1024, 1024}); +// +// auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; +// auto in_to_outs = infer_inplace(*op); +// EXPECT_EQ(in_to_outs.size(), 1ul); +// auto it = in_to_outs.begin(); +// EXPECT_EQ(it->first, "test2_out"); +// EXPECT_EQ(it->second, "test2_a"); +// } +// +// TEST(InferInplace, MultiOutInplaceInToOut) { +// ProgramDesc prog; +// auto* op = prog.MutableBlock(0)->AppendOp(); +// op->SetType("multi_out_op"); +// op->SetInput("X", {"a0", "a1"}); +// op->SetInput("Y", {"b0"}); +// op->SetInput("Z", {"c0", "c1"}); +// op->SetOutput("Out", {"o0"}); +// op->SetOutput("YOut", {"y0"}); +// op->SetOutput("ZOut", {"z0"}); +// +// prog.MutableBlock(0)->Var("a0")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("b0")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("c0")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("c1")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("o0"); +// prog.MutableBlock(0)->Var("y0"); +// prog.MutableBlock(0)->Var("z0"); +// prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); +// +// auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; +// auto in_to_outs = infer_inplace(*op); +// EXPECT_EQ(in_to_outs.size(), 3ul); +// std::unordered_map expects = { +// {"a0", "o0"}, {"b0", "y0"}, {"c0", "z0"}, +// }; +// EXPECT_TRUE(expects == in_to_outs); +// } +// +// TEST(InferInplace, MultiGradInplaceInToOut) { +// ProgramDesc prog; +// auto* op = prog.MutableBlock(0)->AppendOp(); +// op->SetType("multi_out_grad"); +// op->SetInput(GradVarName("Out"), {"o0"}); +// op->SetInput(GradVarName("YOut"), {"y0"}); +// op->SetInput(GradVarName("ZOut"), {"z0"}); +// op->SetOutput(GradVarName("X"), {"a0", "a1"}); +// op->SetOutput(GradVarName("Y"), {"b0"}); +// op->SetOutput(GradVarName("Z"), {"c0", "c1"}); +// +// prog.MutableBlock(0)->Var("a0")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("b0")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("c0")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("c1")->SetType(proto::VarType::LOD_TENSOR); +// prog.MutableBlock(0)->Var("o0"); +// prog.MutableBlock(0)->Var("y0"); +// prog.MutableBlock(0)->Var("z0"); +// prog.MutableBlock(0)->Var("a0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("b0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("c0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("o0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("y0")->SetShape({32, 16, 1024, 1024}); +// prog.MutableBlock(0)->Var("z0")->SetShape({32, 16, 1024, 1024}); +// +// auto& infer_inplace = OpInfoMap::Instance().Get(op->Type()).infer_inplace_; +// auto in_to_outs = infer_inplace(*op); +// +// EXPECT_EQ(in_to_outs.size(), 3ul); +// std::unordered_map expects = { +// {"o0", "a0"}, {"y0", "b0"}, {"z0", "c0"}, +// }; +// EXPECT_TRUE(expects == in_to_outs); +// } } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt index a79a53867d85e91250ac4810caa5806c25f35fee..81b8ffa83f612f5b67cd91a7a2c1228519a1fbb7 100644 --- a/paddle/fluid/framework/ir/CMakeLists.txt +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -46,9 +46,6 @@ cc_library(fuse_pass_base SRCS fuse_pass_base.cc DEPS pass) pass_library(graph_to_program_pass base) pass_library(graph_viz_pass base) pass_library(lock_free_optimize_pass base) -pass_library(cpu_quantize_placement_pass base) -pass_library(cpu_quantize_pass inference) -pass_library(cpu_quantize_squash_pass inference) pass_library(fc_fuse_pass inference) pass_library(attention_lstm_fuse_pass inference) pass_library(infer_clean_graph_pass inference) @@ -71,22 +68,31 @@ pass_library(transpose_flatten_concat_fuse_pass inference) pass_library(identity_scale_op_clean_pass base) pass_library(sync_batch_norm_pass base) pass_library(runtime_context_cache_pass base) +pass_library(simplify_anakin_detection_pattern_pass inference) +pass_library(anakin_fillconstant_elementwisemul_fuse inference) # There may be many transpose-flatten structures in a model, and the output of # these structures will be used as inputs to the concat Op. This pattern will # be detected by our pass. The index here represents the number of structures in the # pattern. We use index 3 ~ 6, because these quantities of structures are # common in the models. -foreach (index RANGE 3 6) +foreach (index RANGE 2 6) file(APPEND ${pass_file} "USE_PASS(transpose_flatten${index}_concat_fuse_pass);\n") endforeach() +foreach (index RANGE 2 6) + file(APPEND ${pass_file} "USE_PASS(simplify_anakin_detection_pattern_pass${index});\n") +endforeach() + if(WITH_MKLDNN) pass_library(mkldnn_placement_pass base mkldnn) pass_library(depthwise_conv_mkldnn_pass base mkldnn) pass_library(conv_bias_mkldnn_fuse_pass inference mkldnn) pass_library(conv_relu_mkldnn_fuse_pass inference mkldnn) pass_library(conv_elementwise_add_mkldnn_fuse_pass inference mkldnn) + pass_library(cpu_quantize_placement_pass base mkldnn) + pass_library(cpu_quantize_pass inference mkldnn) + pass_library(cpu_quantize_squash_pass inference mkldnn) endif() cc_library(fuse_elewise_add_act_pass SRCS fuse_elewise_add_act_pass.cc DEPS pass graph_pattern_detector ) @@ -105,9 +111,6 @@ cc_test(test_graph_pattern_detector SRCS graph_pattern_detector_tester.cc DEPS g cc_test(test_fc_fuse_pass SRCS fc_fuse_pass_tester.cc DEPS fc_fuse_pass framework_proto) cc_test(test_seqpool_concat_fuse_pass SRCS seqpool_concat_fuse_pass_tester.cc DEPS seqpool_concat_fuse_pass framework_proto) cc_test(test_is_test_pass SRCS is_test_pass_tester.cc DEPS is_test_pass) -cc_test(test_cpu_quantize_placement_pass SRCS cpu_quantize_placement_pass_tester.cc DEPS cpu_quantize_placement_pass) -cc_test(test_cpu_quantize_pass SRCS cpu_quantize_pass_tester.cc DEPS cpu_quantize_pass naive_executor) -cc_test(test_cpu_quantize_squash_pass SRCS cpu_quantize_squash_pass_tester.cc DEPS cpu_quantize_squash_pass naive_executor) if(NOT WIN32) cc_test(test_sync_batch_norm_pass SRCS sync_batch_norm_pass_tester.cc DEPS sync_batch_norm_pass) endif() @@ -117,4 +120,7 @@ if (WITH_MKLDNN) cc_test(test_conv_relu_mkldnn_fuse_pass SRCS mkldnn/conv_relu_mkldnn_fuse_pass_tester.cc DEPS conv_relu_mkldnn_fuse_pass) cc_test(test_conv_elementwise_add_mkldnn_fuse_pass SRCS mkldnn/conv_elementwise_add_mkldnn_fuse_pass_tester.cc DEPS conv_elementwise_add_mkldnn_fuse_pass) cc_test(test_mkldnn_placement_pass SRCS mkldnn/mkldnn_placement_pass_tester.cc DEPS mkldnn_placement_pass) + cc_test(test_cpu_quantize_placement_pass SRCS mkldnn/cpu_quantize_placement_pass_tester.cc DEPS cpu_quantize_placement_pass) + cc_test(test_cpu_quantize_pass SRCS mkldnn/cpu_quantize_pass_tester.cc DEPS cpu_quantize_pass naive_executor) + cc_test(test_cpu_quantize_squash_pass SRCS mkldnn/cpu_quantize_squash_pass_tester.cc DEPS cpu_quantize_squash_pass naive_executor) endif () diff --git a/paddle/fluid/framework/ir/anakin_fillconstant_elementwisemul_fuse.cc b/paddle/fluid/framework/ir/anakin_fillconstant_elementwisemul_fuse.cc new file mode 100644 index 0000000000000000000000000000000000000000..83b0da0c0118a856e54d744607cee8b421f330a3 --- /dev/null +++ b/paddle/fluid/framework/ir/anakin_fillconstant_elementwisemul_fuse.cc @@ -0,0 +1,85 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "paddle/fluid/framework/ir/anakin_fillconstant_elementwisemul_fuse.h" +#include "paddle/fluid/framework/ir/graph_viz_pass.h" + +namespace paddle { +namespace framework { +namespace ir { + +#define GET_IR_NODE(node__) GET_IR_NODE_FROM_SUBGRAPH(node__, node__, pattern); +#define GET_NODES \ + GET_IR_NODE(fill_constant); \ + GET_IR_NODE(fill_constant_out); \ + GET_IR_NODE(elementwise_mul); \ + GET_IR_NODE(elementwise_mul_out); + +std::unique_ptr AnakinFillconstantElementwisemulFuse::ApplyImpl( + std::unique_ptr graph) const { + const std::string pattern_name = "anakin_fillconstant_elementwisemul_fuse"; + FusePassBase::Init(pattern_name, graph.get()); + + GraphPatternDetector gpd; + auto* x = gpd.mutable_pattern() + ->NewNode("x") + ->assert_is_op_input("elementwise_mul", "X") + ->AsInput(); + + patterns::AnakinFillConstantElementWiseMulFuse pattern(gpd.mutable_pattern(), + pattern_name); + pattern(x); + + auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph, + Graph* g) { + GET_NODES; + + PADDLE_ENFORCE(subgraph.count(x)); + auto* elementwise_in = subgraph.at(x); + float constant_value = + boost::get(fill_constant->Op()->GetAttr("value")); + + framework::OpDesc new_op_desc; + new_op_desc.SetType("scale"); + new_op_desc.SetInput("X", {elementwise_in->Name()}); + new_op_desc.SetAttr("scale", constant_value); + new_op_desc.SetAttr("bias", static_cast(0.0)); + new_op_desc.SetAttr("bias_after_scale", true); + new_op_desc.SetOutput("Out", {elementwise_mul_out->Name()}); + new_op_desc.Flush(); + + // Create a new node for the fused op. + auto* scale_op = graph->CreateOpNode(&new_op_desc); + + IR_NODE_LINK_TO(elementwise_in, scale_op); // Input + IR_NODE_LINK_TO(scale_op, elementwise_mul_out); // Output + + // Delete the unneeded nodes. + GraphSafeRemoveNodes(graph.get(), + {fill_constant, fill_constant_out, elementwise_mul}); + }; + + gpd(graph.get(), handler); + return graph; +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(anakin_fillconstant_elementwisemul_fuse, + paddle::framework::ir::AnakinFillconstantElementwisemulFuse); diff --git a/paddle/fluid/framework/ir/anakin_fillconstant_elementwisemul_fuse.h b/paddle/fluid/framework/ir/anakin_fillconstant_elementwisemul_fuse.h new file mode 100644 index 0000000000000000000000000000000000000000..fa95143d3adae3e3eeb913af09986fb4a401bd73 --- /dev/null +++ b/paddle/fluid/framework/ir/anakin_fillconstant_elementwisemul_fuse.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" + +namespace paddle { +namespace framework { +namespace ir { + +class AnakinFillconstantElementwisemulFuse : public FusePassBase { + public: + virtual ~AnakinFillconstantElementwisemulFuse() {} + + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index d0d72127f08f4a83cca5daed57ae6d72c33ae1e3..555fdc7b7a03ebc99fcc77a26341d291dac2c308 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -1470,6 +1470,171 @@ PDNode *patterns::TransposeFlattenConcat::operator()( return concat_out; } +PDNode *patterns::AnakinDetectionPattern::operator()( + std::vector conv_in, int times) { + // The times represents the repeat times of the + // {prior_box, prior_box_loc_out, flatten, prior_box_var_out, reshape} + const int kNumFields = 7; + const int kPriorBoxLocOffset = 1; + const int kReshape1Offset = 2; + const int kReshape1OutOffset = 3; + const int kPriorBoxVarOffset = 4; + const int kReshape2Offset = 5; + const int kReshape2OutOffset = 6; + + const int kBoxCoderThirdInputOffset = times; + const int kMultiClassSecondInputNmsOffset = times + 1; + + std::vector nodes; + + for (int i = 0; i < times; i++) { + nodes.push_back( + pattern->NewNode(GetNodeName("prior_box" + std::to_string(i))) + ->assert_is_op("density_prior_box")); + nodes.push_back(pattern->NewNode(GetNodeName("box_out" + std::to_string(i))) + ->assert_is_op_output("density_prior_box", "Boxes") + ->assert_is_op_input("reshape2", "X") + ->AsIntermediate()); + nodes.push_back( + pattern->NewNode(GetNodeName("reshape1" + std::to_string(i))) + ->assert_is_op("reshape2")); + + nodes.push_back( + pattern->NewNode(GetNodeName("reshape1_out" + std::to_string(i))) + ->assert_is_op_output("reshape2") + ->assert_is_op_nth_input("concat", "X", i) + ->AsIntermediate()); + + nodes.push_back( + pattern->NewNode(GetNodeName("box_var_out" + std::to_string(i))) + ->assert_is_op_output("density_prior_box", "Variances") + ->assert_is_op_input("reshape2", "X") + ->AsIntermediate()); + nodes.push_back( + pattern->NewNode(GetNodeName("reshape2" + std::to_string(i))) + ->assert_is_op("reshape2")); + + nodes.push_back( + pattern->NewNode(GetNodeName("reshape2_out" + std::to_string(i))) + ->assert_is_op_output("reshape2") + ->assert_is_op_nth_input("concat", "X", i) + ->AsIntermediate()); + } + + auto concat_op1 = pattern->NewNode(GetNodeName("concat1")) + ->assert_is_op("concat") + ->assert_op_has_n_inputs("concat", times); + auto concat_out1 = pattern->NewNode(GetNodeName("concat1_out")) + ->assert_is_op_output("concat") + ->AsIntermediate(); + + auto concat_op2 = pattern->NewNode(GetNodeName("concat2")) + ->assert_is_op("concat") + ->assert_op_has_n_inputs("concat", times); + auto concat_out2 = pattern->NewNode(GetNodeName("concat2_out")) + ->assert_is_op_output("concat") + ->AsIntermediate(); + + auto box_coder_op = pattern->NewNode(GetNodeName("box_coder")) + ->assert_is_op("box_coder") + ->assert_op_has_n_inputs("box_coder", 3); + + auto box_coder_out = pattern->NewNode(GetNodeName("box_coder_out")) + ->assert_is_op_output("box_coder") + ->AsIntermediate(); + + auto transpose_before_nms = + pattern->NewNode(GetNodeName("transpose_before_nms")) + ->assert_is_op("transpose2"); + + auto transpose_before_nms_out = + pattern->NewNode(GetNodeName("transpose_before_nms_out")) + ->assert_is_op_output("transpose2") + ->assert_is_op_input("multiclass_nms", "Scores") + ->AsIntermediate(); + + auto multiclass_nms_op = pattern->NewNode(GetNodeName("multiclass_nms")) + ->assert_is_op("multiclass_nms") + ->assert_op_has_n_inputs("multiclass_nms", 2); + + auto multiclass_nms_out = pattern->NewNode(GetNodeName("multiclass_nms_out")) + ->assert_is_op_output("multiclass_nms") + ->AsOutput(); + + std::vector reshape1_outs; + std::vector reshape2_outs; + + for (int i = 0; i < times; i++) { + conv_in[i]->AsInput(); + // prior_box + nodes[i * kNumFields]->LinksFrom({conv_in[i]}); + // prior_box box out + nodes[i * kNumFields + kPriorBoxLocOffset]->LinksFrom( + {nodes[i * kNumFields]}); + // reshape + nodes[i * kNumFields + kReshape1Offset]->LinksFrom( + {nodes[i * kNumFields + kPriorBoxLocOffset]}); + // reshape_out + nodes[i * kNumFields + kReshape1OutOffset]->LinksFrom( + {nodes[i * kNumFields + kReshape1Offset]}); + + nodes[i * kNumFields + kPriorBoxVarOffset]->LinksFrom( + {nodes[i * kNumFields]}); + // reshape + nodes[i * kNumFields + kReshape2Offset]->LinksFrom( + {nodes[i * kNumFields + kPriorBoxVarOffset]}); + // reshape_out + nodes[i * kNumFields + kReshape2OutOffset]->LinksFrom( + {nodes[i * kNumFields + kReshape2Offset]}); + + reshape1_outs.push_back(nodes[i * kNumFields + kReshape1OutOffset]); + reshape2_outs.push_back(nodes[i * kNumFields + kReshape2OutOffset]); + } + + concat_op1->LinksFrom(reshape1_outs); + concat_op2->LinksFrom(reshape2_outs); + concat_out1->LinksFrom({concat_op1}); + concat_out2->LinksFrom({concat_op2}); + + conv_in[kBoxCoderThirdInputOffset]->AsInput(); + conv_in[kMultiClassSecondInputNmsOffset]->AsInput(); + + box_coder_op->LinksFrom( + {concat_out1, concat_out2, conv_in[kBoxCoderThirdInputOffset]}); + box_coder_out->LinksFrom({box_coder_op}); + + transpose_before_nms->LinksFrom({conv_in[kMultiClassSecondInputNmsOffset]}); + transpose_before_nms_out->LinksFrom({transpose_before_nms}); + + multiclass_nms_op->LinksFrom({box_coder_out, transpose_before_nms_out}) + .LinksTo({multiclass_nms_out}); + + return multiclass_nms_out; +} + +PDNode *patterns::AnakinFillConstantElementWiseMulFuse::operator()( + PDNode *elementwise_op_input) { + auto fill_constant = + pattern->NewNode(fill_constant_repr())->assert_is_op("fill_constant"); + + auto fill_constant_out = pattern->NewNode(fill_constant_out_repr()) + ->assert_is_op_output("fill_constant") + ->assert_is_op_input("elementwise_mul", "Y") + ->AsIntermediate(); + + auto elementwise_mul_op = + pattern->NewNode(elementwise_mul_repr())->assert_is_op("elementwise_mul"); + + auto elementwise_mul_out = pattern->NewNode(elementwise_mul_out_repr()) + ->assert_is_op_output("elementwise_mul") + ->AsOutput(); + + fill_constant_out->LinksFrom({fill_constant}); + elementwise_mul_op->LinksFrom({elementwise_op_input, fill_constant_out}); + elementwise_mul_out->LinksFrom({elementwise_mul_op}); + return elementwise_mul_out; +} + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index bac23b651305419a5bcc4fc1efacb721e6e5d0ad..130ddeac4cd1a38516540d175e17d46f877bd909 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -844,6 +844,36 @@ struct TransposeFlattenConcat : public PatternBase { } }; +struct AnakinDetectionPattern : public PatternBase { + AnakinDetectionPattern(PDPattern* pattern, const std::string& name_scope) + : PatternBase(pattern, name_scope, "anakin_detect_pattern") {} + + PDNode* operator()(std::vector conv_inputs, int times); + + std::string GetNodeName(const std::string& op_type) { + return PDNodeName(name_scope_, repr_, id_, op_type); + } + + PDNode* GetPDNode(const std::string& op_type) { + return pattern->RetrieveNode(GetNodeName(op_type)); + } +}; + +struct AnakinFillConstantElementWiseMulFuse : public PatternBase { + AnakinFillConstantElementWiseMulFuse(PDPattern* pattern, + const std::string& name_scope) + : PatternBase(pattern, name_scope, + "anakin_fillconstant_elementwisemul_fuse") {} + + PDNode* operator()(PDNode* elementwise_op_input); + + // declare operator node's name + PATTERN_DECL_NODE(fill_constant); + PATTERN_DECL_NODE(fill_constant_out); + PATTERN_DECL_NODE(elementwise_mul); + PATTERN_DECL_NODE(elementwise_mul_out); +}; + } // namespace patterns // Link two ir::Nodes from each other. diff --git a/paddle/fluid/framework/ir/cpu_quantize_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc similarity index 99% rename from paddle/fluid/framework/ir/cpu_quantize_pass.cc rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc index ed80f9cae347cfb2bf23859daea2f1f47dba599b..b3a8c208916f699dc032496c6d0fa5bf86227903 100644 --- a/paddle/fluid/framework/ir/cpu_quantize_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/cpu_quantize_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.h" #include #include #include "paddle/fluid/framework/eigen.h" diff --git a/paddle/fluid/framework/ir/cpu_quantize_pass.h b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.h similarity index 100% rename from paddle/fluid/framework/ir/cpu_quantize_pass.h rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.h diff --git a/paddle/fluid/framework/ir/cpu_quantize_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc similarity index 99% rename from paddle/fluid/framework/ir/cpu_quantize_pass_tester.cc rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc index 89601be7d1c0f5c9d3c3dcefa4327be7c20a7d65..0d0ed989012fced7f639c2bc12a3bafa6edf27f6 100644 --- a/paddle/fluid/framework/ir/cpu_quantize_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/cpu_quantize_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.h" #include #include "paddle/fluid/framework/naive_executor.h" #include "paddle/fluid/platform/place.h" diff --git a/paddle/fluid/framework/ir/cpu_quantize_placement_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass.cc similarity index 96% rename from paddle/fluid/framework/ir/cpu_quantize_placement_pass.cc rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass.cc index 50bbe4915b3502a867be397ae0922d982108d12c..511003dce59f91272802766544577e9c473a3a1d 100644 --- a/paddle/fluid/framework/ir/cpu_quantize_placement_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/framework/ir/cpu_quantize_placement_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass.h" #include #include diff --git a/paddle/fluid/framework/ir/cpu_quantize_placement_pass.h b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass.h similarity index 100% rename from paddle/fluid/framework/ir/cpu_quantize_placement_pass.h rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass.h diff --git a/paddle/fluid/framework/ir/cpu_quantize_placement_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass_tester.cc similarity index 98% rename from paddle/fluid/framework/ir/cpu_quantize_placement_pass_tester.cc rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass_tester.cc index 5a4d622645a4377526351bbf4acbcea95a780d22..11d72a56bd66792ff3ed5cc8184f5b242d9cdba5 100644 --- a/paddle/fluid/framework/ir/cpu_quantize_placement_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass_tester.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/cpu_quantize_placement_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/cpu_quantize_placement_pass.h" #include #include diff --git a/paddle/fluid/framework/ir/cpu_quantize_squash_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc similarity index 98% rename from paddle/fluid/framework/ir/cpu_quantize_squash_pass.cc rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc index de62a69de4f25912c5f56973de0dca5343bbe906..6e74cc7787b73d06b1093ed4e846ab83b1234803 100644 --- a/paddle/fluid/framework/ir/cpu_quantize_squash_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/cpu_quantize_squash_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.h" #include #include #include "paddle/fluid/platform/enforce.h" diff --git a/paddle/fluid/framework/ir/cpu_quantize_squash_pass.h b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.h similarity index 100% rename from paddle/fluid/framework/ir/cpu_quantize_squash_pass.h rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.h diff --git a/paddle/fluid/framework/ir/cpu_quantize_squash_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc similarity index 98% rename from paddle/fluid/framework/ir/cpu_quantize_squash_pass_tester.cc rename to paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc index 3a3eb53f79955b37f5f9af6a09b2f9c8e934aa3e..3cf51d97aa4b8be468b8c2a78dd17aafbbf0e15b 100644 --- a/paddle/fluid/framework/ir/cpu_quantize_squash_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/ir/cpu_quantize_squash_pass.h" +#include "paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.h" #include #include "paddle/fluid/framework/naive_executor.h" #include "paddle/fluid/platform/place.h" diff --git a/paddle/fluid/framework/ir/simplify_anakin_detection_pattern_pass.cc b/paddle/fluid/framework/ir/simplify_anakin_detection_pattern_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..84fb8063e6f020d5ada2c6af7a0307360aa1c92c --- /dev/null +++ b/paddle/fluid/framework/ir/simplify_anakin_detection_pattern_pass.cc @@ -0,0 +1,244 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "paddle/fluid/framework/ir/graph_viz_pass.h" +#include "paddle/fluid/framework/ir/node.h" +#include "paddle/fluid/framework/ir/simplify_anakin_detection_pattern_pass.h" + +namespace paddle { +namespace framework { +namespace ir { + +template +std::unique_ptr SimplifyAnakinDetectionPatternPass::ApplyImpl( + std::unique_ptr graph) const { + const std::string pattern_name = + "simplify_anakin_detection_pattern_pass" + std::to_string(times); + FusePassBase::Init(pattern_name, graph.get()); + + GraphPatternDetector gpd; + std::vector input_nodes; + for (int i = 0; i < times; i++) { + input_nodes.push_back(gpd.mutable_pattern() + ->NewNode("x" + std::to_string(i)) + ->assert_is_op_input("density_prior_box", "Input") + ->AsInput()); + } + input_nodes.push_back(gpd.mutable_pattern() + ->NewNode("x" + std::to_string(times)) + ->assert_is_op_input("box_coder", "TargetBox") + ->AsInput()); + + input_nodes.push_back(gpd.mutable_pattern() + ->NewNode("x" + std::to_string(times + 1)) + ->assert_is_op_input("transpose2") + ->AsInput()); + + patterns::AnakinDetectionPattern pattern(gpd.mutable_pattern(), pattern_name); + pattern(input_nodes, times); + + auto handler = [&](const GraphPatternDetector::subgraph_t &subgraph, + Graph *g) { + const int kNumFields = 7; + const int kPriorBoxLocOffset = 1; + const int kReshape1Offset = 2; + const int kReshape1OutOffset = 3; + const int kPriorBoxVarOffset = 4; + const int kReshape2Offset = 5; + const int kReshape2OutOffset = 6; + std::vector nodes; + + for (int i = 0; i < times; i++) { + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("prior_box" + std::to_string(i)))); + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("box_out" + std::to_string(i)))); + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("reshape1" + std::to_string(i)))); + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("reshape1_out" + std::to_string(i)))); + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("reshape2" + std::to_string(i)))); + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("reshape2_out" + std::to_string(i)))); + + PADDLE_ENFORCE( + subgraph.at(pattern.GetPDNode("box_var_out" + std::to_string(i)))); + + nodes.push_back( + subgraph.at(pattern.GetPDNode("prior_box" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("box_out" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("reshape1" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("reshape1_out" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("box_var_out" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("reshape2" + std::to_string(i)))); + nodes.push_back( + subgraph.at(pattern.GetPDNode("reshape2_out" + std::to_string(i)))); + } + + Node *concat_op1 = subgraph.at(pattern.GetPDNode("concat1")); + Node *concat_out1 = subgraph.at(pattern.GetPDNode("concat1_out")); + + Node *concat_op2 = subgraph.at(pattern.GetPDNode("concat2")); + Node *concat_out2 = subgraph.at(pattern.GetPDNode("concat2_out")); + + Node *box_coder_third_input = subgraph.at(input_nodes[times]); + Node *box_coder_op = subgraph.at(pattern.GetPDNode("box_coder")); + Node *box_coder_out = subgraph.at(pattern.GetPDNode("box_coder_out")); + + Node *multiclass_nms_second_input = subgraph.at(input_nodes[times + 1]); + Node *transpose_before_nms = + subgraph.at(pattern.GetPDNode("transpose_before_nms")); + Node *transpose_before_nms_out = + subgraph.at(pattern.GetPDNode("transpose_before_nms_out")); + + Node *multiclass_nms = subgraph.at(pattern.GetPDNode("multiclass_nms")); + Node *multiclass_nms_out = + subgraph.at(pattern.GetPDNode("multiclass_nms_out")); + + std::string code_type = + boost::get(box_coder_op->Op()->GetAttr("code_type")); + bool box_normalized = + boost::get(box_coder_op->Op()->GetAttr("box_normalized")); + // auto variance = + // boost::get>(box_coder_op->Op()->GetAttr("variance")); + int background_label = + boost::get(multiclass_nms->Op()->GetAttr("background_label")); + float score_threshold = + boost::get(multiclass_nms->Op()->GetAttr("score_threshold")); + int nms_top_k = boost::get(multiclass_nms->Op()->GetAttr("nms_top_k")); + float nms_threshold = + boost::get(multiclass_nms->Op()->GetAttr("nms_threshold")); + float nms_eta = boost::get(multiclass_nms->Op()->GetAttr("nms_eta")); + int keep_top_k = + boost::get(multiclass_nms->Op()->GetAttr("keep_top_k")); + + std::vector concat1_input_names; + for (int i = 0; i < times; i++) { + concat1_input_names.push_back( + nodes[i * kNumFields + kPriorBoxLocOffset]->Name()); + } + + // int axis = boost::get(concat_op1->Op()->GetAttr("axis")); + framework::OpDesc concat1_desc; + concat1_desc.SetType("concat"); + concat1_desc.SetInput("X", concat1_input_names); + concat1_desc.SetAttr("axis", 2); + concat1_desc.SetOutput("Out", {concat_out1->Name()}); + + auto *new_add_concat_op = graph->CreateOpNode(&concat1_desc); + + for (int i = 0; i < times; i++) { + nodes[i * kNumFields + kPriorBoxLocOffset]->outputs.push_back( + new_add_concat_op); + new_add_concat_op->inputs.push_back( + nodes[i * kNumFields + kPriorBoxLocOffset]); + } + + framework::OpDesc new_op_desc; + new_op_desc.SetType("detection_out"); + new_op_desc.SetInput("PriorBox", {concat_out1->Name()}); + new_op_desc.SetInput("TargetBox", {box_coder_third_input->Name()}); + new_op_desc.SetInput("Scores", {multiclass_nms_second_input->Name()}); + new_op_desc.SetAttr("code_type", code_type); + new_op_desc.SetAttr("box_normalized", box_normalized); + new_op_desc.SetAttr("background_label", background_label); + new_op_desc.SetAttr("score_threshold", score_threshold); + new_op_desc.SetAttr("nms_top_k", nms_top_k); + new_op_desc.SetAttr("nms_threshold", nms_threshold); + new_op_desc.SetAttr("nms_eta", nms_eta); + new_op_desc.SetAttr("keep_top_k", keep_top_k); + new_op_desc.SetOutput("Out", {multiclass_nms_out->Name()}); + new_op_desc.Flush(); + + // Create a new node for the fused op. + auto *detection_out_op = graph->CreateOpNode(&new_op_desc); + + std::unordered_set delete_nodes; + + for (int i = 0; i < times; i++) { + nodes[i * kNumFields + kPriorBoxLocOffset]->outputs.push_back(concat_op1); + delete_nodes.insert(nodes[i * kNumFields + kReshape1Offset]); + delete_nodes.insert(nodes[i * kNumFields + kReshape1OutOffset]); + delete_nodes.insert(nodes[i * kNumFields + kPriorBoxVarOffset]); + delete_nodes.insert(nodes[i * kNumFields + kReshape2Offset]); + delete_nodes.insert(nodes[i * kNumFields + kReshape2OutOffset]); + } + + delete_nodes.insert(concat_op1); + delete_nodes.insert(concat_op2); + delete_nodes.insert(concat_out2); + delete_nodes.insert(box_coder_op); + delete_nodes.insert(box_coder_out); + delete_nodes.insert(transpose_before_nms); + delete_nodes.insert(transpose_before_nms_out); + delete_nodes.insert(multiclass_nms); + + new_add_concat_op->outputs.push_back(concat_out1); + concat_out1->inputs.push_back(new_add_concat_op); + + detection_out_op->inputs.push_back(concat_out1); + detection_out_op->inputs.push_back(box_coder_third_input); + detection_out_op->inputs.push_back(multiclass_nms_second_input); + detection_out_op->outputs.push_back(multiclass_nms_out); + + concat_out1->outputs.push_back(detection_out_op); + box_coder_third_input->outputs.push_back(detection_out_op); + multiclass_nms_second_input->outputs.push_back(detection_out_op); + multiclass_nms_out->inputs.push_back(detection_out_op); + + // Delete the unneeded nodes. + GraphSafeRemoveNodes(graph.get(), delete_nodes); + }; + + gpd(graph.get(), handler); + return graph; +} + +template class SimplifyAnakinDetectionPatternPass<1>; +template class SimplifyAnakinDetectionPatternPass<2>; +template class SimplifyAnakinDetectionPatternPass<3>; +template class SimplifyAnakinDetectionPatternPass<4>; +template class SimplifyAnakinDetectionPatternPass<5>; +template class SimplifyAnakinDetectionPatternPass<6>; + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(simplify_anakin_detection_pattern_pass, + paddle::framework::ir::SimplifyAnakinDetectionPatternPass<1>); + +REGISTER_PASS(simplify_anakin_detection_pattern_pass2, + paddle::framework::ir::SimplifyAnakinDetectionPatternPass<2>); + +REGISTER_PASS(simplify_anakin_detection_pattern_pass3, + paddle::framework::ir::SimplifyAnakinDetectionPatternPass<3>); + +REGISTER_PASS(simplify_anakin_detection_pattern_pass4, + paddle::framework::ir::SimplifyAnakinDetectionPatternPass<4>); + +REGISTER_PASS(simplify_anakin_detection_pattern_pass5, + paddle::framework::ir::SimplifyAnakinDetectionPatternPass<5>); + +REGISTER_PASS(simplify_anakin_detection_pattern_pass6, + paddle::framework::ir::SimplifyAnakinDetectionPatternPass<6>); diff --git a/paddle/fluid/framework/ir/simplify_anakin_detection_pattern_pass.h b/paddle/fluid/framework/ir/simplify_anakin_detection_pattern_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..2338e4c38b253e2110addcca494e1cae5b58beaf --- /dev/null +++ b/paddle/fluid/framework/ir/simplify_anakin_detection_pattern_pass.h @@ -0,0 +1,41 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" + +namespace paddle { +namespace framework { +namespace ir { + +// There may be many transpose-flatten structures in a model, and the output of +// these structures will be used as inputs to the concat Op. This pattern will +// be detected by our pass. The times here represents the repeat times of this +// structure. +template +class SimplifyAnakinDetectionPatternPass : public FusePassBase { + public: + virtual ~SimplifyAnakinDetectionPatternPass() {} + + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc index fda43948d567689103815e3ad7ba285719dae80f..cab69c408defadad32eba83e47d18f0f82ccc771 100644 --- a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc +++ b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include #include +#include #include #include "paddle/fluid/framework/ir/graph_viz_pass.h" @@ -123,6 +125,7 @@ std::unique_ptr TransposeFlattenConcatFusePass::ApplyImpl( } template class TransposeFlattenConcatFusePass<1>; +template class TransposeFlattenConcatFusePass<2>; template class TransposeFlattenConcatFusePass<3>; template class TransposeFlattenConcatFusePass<4>; template class TransposeFlattenConcatFusePass<5>; @@ -135,6 +138,9 @@ template class TransposeFlattenConcatFusePass<6>; REGISTER_PASS(transpose_flatten_concat_fuse_pass, paddle::framework::ir::TransposeFlattenConcatFusePass<1>); +REGISTER_PASS(transpose_flatten2_concat_fuse_pass, + paddle::framework::ir::TransposeFlattenConcatFusePass<2>); + REGISTER_PASS(transpose_flatten3_concat_fuse_pass, paddle::framework::ir::TransposeFlattenConcatFusePass<3>); diff --git a/paddle/fluid/framework/no_need_buffer_vars_inference.h b/paddle/fluid/framework/no_need_buffer_vars_inference.h new file mode 100644 index 0000000000000000000000000000000000000000..2c933659840d02e65c3b222144a31e558e8e8ae8 --- /dev/null +++ b/paddle/fluid/framework/no_need_buffer_vars_inference.h @@ -0,0 +1,60 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include "paddle/fluid/framework/op_desc.h" + +namespace paddle { +namespace framework { + +class NoNeedBufferVarsInference { + public: + NoNeedBufferVarsInference(const VariableNameMap &inputs, + const VariableNameMap &outputs, + const AttributeMap &attrs) + : inputs_(inputs), outputs_(outputs), attrs_(attrs) {} + + virtual ~NoNeedBufferVarsInference() = default; + + const VariableNameMap &Inputs() const { return inputs_; } + + const VariableNameMap &Outputs() const { return outputs_; } + + const AttributeMap &Attrs() const { return attrs_; } + + virtual std::unordered_set operator()() const = 0; + + private: + const VariableNameMap &inputs_; + const VariableNameMap &outputs_; + const AttributeMap &attrs_; +}; + +#define DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(class_type, ...) \ + class class_type : public ::paddle::framework::NoNeedBufferVarsInference { \ + public: \ + using ::paddle::framework::NoNeedBufferVarsInference:: \ + NoNeedBufferVarsInference; \ + \ + std::unordered_set operator()() const override { \ + return {__VA_ARGS__}; \ + } \ + } + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/op_info.h b/paddle/fluid/framework/op_info.h index 4b55bd0703eee399cd841f90ea0b18d8fbdc67e8..e200d188b3f2462657bbac086d7659b1f85e55e9 100644 --- a/paddle/fluid/framework/op_info.h +++ b/paddle/fluid/framework/op_info.h @@ -19,6 +19,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/attribute.h" +#include "paddle/fluid/framework/no_need_buffer_vars_inference.h" #include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/platform/macros.h" @@ -39,6 +40,7 @@ struct OpInfo { InferVarTypeFN infer_var_type_; InferShapeFN infer_shape_; InferInplaceOpFN infer_inplace_; + InferNoNeedBufferVarsFN infer_no_need_buffer_vars_; bool HasOpProtoAndChecker() const { return proto_ != nullptr && checker_ != nullptr; @@ -64,6 +66,10 @@ struct OpInfo { } const OpAttrChecker* Checker() const { return checker_; } + + const InferNoNeedBufferVarsFN& NoNeedBufferVarsInferer() const { + return infer_no_need_buffer_vars_; + } }; class OpInfoMap { diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 1ba2bed886beb04d05856ac1235b7164e80f3676..eef84d17a4b9ec54f77e60b09f3a13b151794bea 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include +#include #include #include "paddle/fluid/framework/data_transform.h" #include "paddle/fluid/framework/executor.h" @@ -64,9 +65,9 @@ static DDim GetDims(const Scope& scope, const std::string& name, if (var->IsType()) { const LoDTensor& tensor = var->Get(); - if (UNLIKELY(!tensor.IsInitialized())) { - return DDim({-1}); - } + // if (UNLIKELY(!tensor.IsInitialized())) { + // return DDim({-1}); + // } return tensor.dims(); } else if (var->IsType()) { if (get_actual_dim) { @@ -132,9 +133,9 @@ static LoD GetLoD(const Scope& scope, const std::string& name) { if (var->IsType()) { const LoDTensor& tensor = var->Get(); - if (UNLIKELY(!tensor.IsInitialized())) { - return default_lod; - } + // if (UNLIKELY(!tensor.IsInitialized())) { + // return default_lod; + // } return tensor.lod(); } else { return default_lod; @@ -326,7 +327,12 @@ OperatorBase::OperatorBase(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, const AttributeMap& attrs) - : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) { + : type_(type), + inputs_(inputs), + outputs_(outputs), + attrs_(attrs), + // NOTE(zjl): why op_info may be nullptr? + info_(OpInfoMap::Instance().GetNullable(type)) { GenerateTemporaryNames(); CheckAllInputOutputSet(); } @@ -350,7 +356,7 @@ std::vector OperatorBase::OutputVars(bool has_intermediate) const { } return ret_val; } - auto& info = OpInfoMap::Instance().Get(Type()); + auto& info = Info(); // get all OpProto::Var for outputs for (auto& o : info.Proto().outputs()) { @@ -366,18 +372,16 @@ std::vector OperatorBase::OutputVars(bool has_intermediate) const { } void OperatorBase::CheckAllInputOutputSet() const { - auto& info_map = OpInfoMap::Instance(); - auto* op_info = info_map.GetNullable(Type()); - if (op_info == nullptr || op_info->proto_ == nullptr) return; + if (info_ == nullptr || info_->proto_ == nullptr) return; - for (auto& in : op_info->Proto().inputs()) { + for (auto& in : info_->Proto().inputs()) { if (!in.dispensable()) { PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(), "Operator %s's input, %s, is not set", Type(), in.name()); } } - for (auto& out : op_info->Proto().outputs()) { + for (auto& out : info_->Proto().outputs()) { if (!out.dispensable()) { PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(), "Operator %s's output, %s, is not set", Type(), @@ -997,7 +1001,27 @@ Scope* OperatorWithKernel::PrepareData( std::vector* transfered_inplace_vars, RuntimeContext* ctx) const { Scope* new_scope = nullptr; + + std::unordered_set no_buffer_ins; + if (info_) { + auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer(); + // Some op may not register NoNeedBufferVarsInferer + if (no_buffer_inferer) { + no_buffer_ins = no_buffer_inferer(Inputs(), Outputs(), Attrs()); + } + } + for (auto& var_name_item : Inputs()) { + // NOTE(zjl): STL does not guarantee fast std::unordered_set::count when set + // is empty. At least STL implemented on my mac does calculate hash code + // of search key even though the set is empty. + if (!no_buffer_ins.empty() && + no_buffer_ins.count(var_name_item.first) > 0) { + VLOG(1) << "Skip scanning input " << var_name_item.first + << " in Operator " << type_; + continue; + } + std::vector& input_vars = ctx->inputs[var_name_item.first]; for (size_t i = 0; i < var_name_item.second.size(); ++i) { diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 684960c235b334f605553c4daed8fb7653be121b..a02e53dcf764368601646a900833ac650c5bb31a 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -160,6 +160,11 @@ class OperatorBase { const VariableNameMap& Inputs() const { return inputs_; } const VariableNameMap& Outputs() const { return outputs_; } + const OpInfo& Info() const { + PADDLE_ENFORCE_NOT_NULL(info_, "OpInfo of %s is not found", type_); + return *info_; + } + bool HasInputs(const std::string& name) const; //! Get a input with argument's name described in `op_proto` std::string Input(const std::string& name) const; @@ -194,6 +199,10 @@ class OperatorBase { // IG (Inputs Gradients) VariableNameMap outputs_; AttributeMap attrs_; + + // OpInfo + const OpInfo* info_; + // Whether this operator executes in an Executor. bool run_by_executor_{true}; @@ -444,7 +453,7 @@ class OperatorWithKernel : public OperatorBase { } virtual void InferShape(InferShapeContext* ctx) const { - OpInfoMap::Instance().Get(Type()).infer_shape_(ctx); + Info().infer_shape_(ctx); } void RuntimeInferShape(const Scope& scope, const platform::Place& place, diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index d79bf25518bbe624f2913839ec7d7d80816b3b69..a96baaf41f3fcd24817421a7b620343558cd78d1 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -29,15 +29,6 @@ DEFINE_bool( "Delete local scope eagerly. It will reduce GPU memory usage but " "slow down the destruction of variables.(around 1% performance harm)"); -DEFINE_double( - eager_delete_tensor_gb, -1.0, - "Memory size threshold (GB) when the garbage collector clear tensors." - "Disabled when this value is less than 0"); - -DEFINE_bool(fast_eager_deletion_mode, true, - "Fast eager deletion mode. If enabled, memory would release " - "immediately without waiting GPU kernel ends."); - // When in inference scenario, the scopes will not be written by two threads in // a mean time, but a scope may be read by multiple threads concurrently, and // the mutex will cause serious performance issue. @@ -57,15 +48,6 @@ DEFINE_bool(fast_eager_deletion_mode, true, namespace paddle { namespace framework { -int64_t GetEagerDeletionThreshold() { - return FLAGS_eager_delete_tensor_gb < 0 - ? -1 - : static_cast(FLAGS_eager_delete_tensor_gb * - (static_cast(1) << 30)); -} - -bool IsFastEagerDeletionModeEnabled() { return FLAGS_fast_eager_deletion_mode; } - Scope::~Scope() { DropKids(); } Scope& Scope::NewScope() const { diff --git a/paddle/fluid/framework/scope.h b/paddle/fluid/framework/scope.h index f0915d2eee072b0bcd53f37dad5ef9d801c87172..242cbae7163c48fa44dca9237f1cd35f9ec98442 100644 --- a/paddle/fluid/framework/scope.h +++ b/paddle/fluid/framework/scope.h @@ -32,9 +32,6 @@ extern "C" { namespace paddle { namespace framework { -int64_t GetEagerDeletionThreshold(); -bool IsFastEagerDeletionModeEnabled(); - class Scope; /** diff --git a/paddle/fluid/framework/type_defs.h b/paddle/fluid/framework/type_defs.h index f55520901c53fcc5bea90c5758f401f021a5c723..4ae6a272d5b043f25015ad8d5cfc2139d394ed5c 100644 --- a/paddle/fluid/framework/type_defs.h +++ b/paddle/fluid/framework/type_defs.h @@ -30,6 +30,7 @@ class InferShapeContext; class InferVarTypeContext; class BlockDesc; class Variable; +class NoNeedBufferVarsInference; using VariableNameMap = std::map>; // TODO(panyx0718): Replace vector with something like gtl::Vector. @@ -59,7 +60,11 @@ using InferVarTypeFN = using InferShapeFN = std::function; using InplacePair = std::unordered_map; -using InferInplaceOpFN = std::function; +using InferInplaceOpFN = std::function; + +using InferNoNeedBufferVarsFN = std::function( + const VariableNameMap& /*inputs*/, const VariableNameMap& /*outputs*/, + const AttributeMap& /*attrs*/)>; } // namespace framework } // namespace paddle diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 3d1de95f58ded4af7fcc3d4c75b4d5e1aa63f13f..036d2a50a4a7ea3ce7e052a56202b1d54465b03e 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -315,6 +315,9 @@ std::map> OpBase::ApplyGrad() { for (size_t i = 0; i < outputs.size(); ++i) { framework::Variable* grad = outputs[i]->var_; framework::Variable* orig_grad = origin_outputs[i]->var_; + VLOG(3) << "AddTo Called with orig_grad is: " + << origin_outputs[i]->name_ << " Grad to be added is " + << outputs[i]->name_; AddTo(grad, orig_grad, place_); delete grad; } diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 0cfdea030eb4ef297e26fabb7fc394e5cbc19033..7c9d0af3ecd647604ab46ee6239fc352e5fd8d85 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -277,6 +277,7 @@ std::set Tracer::Trace(OpBase* op, const VarBasePtrMap& inputs, VarBase* var = current_vars_map[var_it->second]; InitGrad(var, prepared_op.GetDeviceContext()); grad_out_vars.push_back(var->grads_); + VLOG(3) << "grads output var name: " << var->name_; } } } diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index d27ef8fe3c33f0b293671a4fdac9e574cb92c806..4cd29486a8e846dc04fbc4e467f2c40782408dfa 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -16,7 +16,10 @@ add_subdirectory(utils) if (TENSORRT_FOUND) add_subdirectory(tensorrt) endif() -# add_subdirectory(anakin) + +if (ANAKIN_FOUND) + add_subdirectory(anakin) +endif() get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) get_property(cuda_modules GLOBAL PROPERTY CUDA_MODULES) diff --git a/paddle/fluid/inference/anakin/CMakeLists.txt b/paddle/fluid/inference/anakin/CMakeLists.txt index b418af62f8cae4513bcca24f057d1fe100bbea25..e8fb56590563f49f920bfe71d160ec822cb3ca30 100644 --- a/paddle/fluid/inference/anakin/CMakeLists.txt +++ b/paddle/fluid/inference/anakin/CMakeLists.txt @@ -1,4 +1,5 @@ -cc_library(anakin_engine SRCS engine.cc) +cc_library(anakin_engine SRCS engine.cc DEPS framework_proto) +cc_library(anakin_op_teller SRCS op_teller.cc DEPS framework_proto) target_link_libraries(anakin_engine anakin anakin_saber_common) cc_test(test_anakin_engine SRCS test_anakin_engine.cc DEPS anakin_engine) add_subdirectory(convert) diff --git a/paddle/fluid/inference/anakin/convert/CMakeLists.txt b/paddle/fluid/inference/anakin/convert/CMakeLists.txt index f5bfee861f14877b5a67bc48aeb14b8213a27370..1e7f5ac799de0d7a1debec0529d262f021bba790 100644 --- a/paddle/fluid/inference/anakin/convert/CMakeLists.txt +++ b/paddle/fluid/inference/anakin/convert/CMakeLists.txt @@ -1,2 +1,19 @@ -cc_library(anakin_op_converter SRCS fc.cc registrar.cc DEPS anakin_engine framework_proto scope) -cc_test(test_anakin_fc SRCS test_fc_op.cc DEPS anakin_op_converter mul_op) +cc_library(anakin_op_converter SRCS fc.cc conv2d.cc conv2d_fusion.cc + elementwise.cc activation.cc pool2d.cc concat.cc split.cc relu.cc softmax.cc batch_norm.cc reshape.cc flatten.cc transpose.cc density_prior_box.cc detection_out.cc scale.cc dropout.cc im2sequence.cc sum.cc DEPS anakin_engine framework_proto scope op_registry) + +cc_test(test_anakin_fc SRCS test_fc_op.cc DEPS anakin_op_converter mul_op SERIAL) +cc_test(test_anakin_conv2d SRCS test_conv2d_op.cc DEPS anakin_op_converter conv_op im2col vol2col depthwise_conv SERIAL) +cc_test(test_anakin_activation SRCS test_activation_op.cc DEPS activation_op anakin_op_converter SERIAL) +cc_test(test_anakin_pool2d SRCS test_pool2d_op.cc DEPS anakin_op_converter pool_op pooling SERIAL) +cc_test(test_anakin_concat SRCS test_concat_op.cc DEPS anakin_op_converter concat_op concat_and_split SERIAL) +cc_test(test_anakin_split SRCS test_split_op.cc DEPS anakin_op_converter split_op concat_and_split SERIAL) +cc_test(test_anakin_elementwise SRCS test_elementwise_op.cc DEPS anakin_op_converter elementwise_add_op elementwise_mul_op SERIAL) +cc_test(test_anakin_relu SRCS test_relu_op.cc DEPS activation_op anakin_op_converter SERIAL SERIAL) +cc_test(test_anakin_softmax SRCS test_softmax_op.cc DEPS anakin_op_converter softmax_op softmax SERIAL) +cc_test(test_anakin_reshape SRCS test_reshape_op.cc DEPS anakin_op_converter reshape_op SERIAL) +cc_test(test_anakin_flatten SRCS test_flatten_op.cc DEPS anakin_op_converter flatten_op reshape_op SERIAL) +cc_test(test_anakin_transpose SRCS test_transpose_op.cc DEPS anakin_op_converter transpose_op SERIAL) +cc_test(test_anakin_batch_norm SRCS test_batch_norm_op.cc DEPS anakin_op_converter batch_norm_op SERIAL) +cc_test(test_anakin_dropout SRCS test_dropout_op.cc DEPS anakin_op_converter dropout_op SERIAL) +#cc_test(test_anakin_im2sequence SRCS test_im2sequence_op.cc DEPS anakin_op_converter im2sequence_op im2col) +cc_test(test_anakin_sum SRCS test_sum_op.cc DEPS anakin_op_converter sum_op selected_rows_functor SERIAL) diff --git a/paddle/fluid/inference/anakin/convert/activation.cc b/paddle/fluid/inference/anakin/convert/activation.cc new file mode 100644 index 0000000000000000000000000000000000000000..c85b958d7b85cb3e21df8714c89eee10b9b3fecc --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/activation.cc @@ -0,0 +1,55 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/activation.h" +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; + +namespace paddle { +namespace inference { +namespace anakin { + +ActivationOpConverter::ActivationOpConverter(const std::string &op_type) + : op_type_(op_type) { + auto it = anakin_op_types_.find(op_type_); + PADDLE_ENFORCE(it != anakin_op_types_.end(), + "activation op type is not support"); + anakin_op_type_ = it->second; +} + +void ActivationOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + auto input_name = op_desc.Input("X").front(); + auto output_name = op_desc.Output("Out").front(); + engine_->AddOp(op_name, "Activation", {input_name}, {output_name}); + engine_->AddOpAttr(op_name, "type", anakin_op_type_); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(sigmoid, SigmoidOpConverter); +REGISTER_ANAKIN_OP_CONVERTER(tanh, TanhOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/activation.h b/paddle/fluid/inference/anakin/convert/activation.h new file mode 100644 index 0000000000000000000000000000000000000000..49a4518bef418491a7fbc0bcde403bf047f774bd --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/activation.h @@ -0,0 +1,52 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class ActivationOpConverter : public AnakinOpConverter { + public: + explicit ActivationOpConverter(const std::string &op_type); + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~ActivationOpConverter() {} + + private: + std::string op_type_; + std::string anakin_op_type_; + std::map anakin_op_types_{{"tanh", "TanH"}, + {"sigmoid", "Sigmoid"}}; +}; + +class TanhOpConverter : public ActivationOpConverter { + public: + TanhOpConverter() : ActivationOpConverter("tanh") {} +}; + +class SigmoidOpConverter : public ActivationOpConverter { + public: + SigmoidOpConverter() : ActivationOpConverter("sigmoid") {} +}; +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.cc b/paddle/fluid/inference/anakin/convert/batch_norm.cc new file mode 100644 index 0000000000000000000000000000000000000000..94014802bdbe1792e9eaba28d7134624dd3edc90 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/batch_norm.cc @@ -0,0 +1,127 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/batch_norm.h" +#include +#include +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; + +namespace paddle { +namespace inference { +namespace anakin { + +void BatchNormOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Output("Y").size(), 1); + std::map inputs; + for (auto k : {"X", "Scale", "Bias", "Mean", "Variance"}) { + PADDLE_ENFORCE_EQ(op_desc.Input(k).size(), 1UL); + auto v = op_desc.Input(k).front(); + inputs.insert({k, v}); + } + + auto output = op_desc.Output("Y").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Y").front(); + auto epsilon = boost::get(op_desc.GetAttr("epsilon")); + // auto momentum = boost::get(op_desc.GetAttr("momentum")); + + auto bn_op_name = op_name + ":bn"; + auto bn_output = bn_op_name + "_output"; + engine_->AddOp(bn_op_name, "BatchNorm", {inputs["X"]}, {bn_output}); + engine_->AddOpAttr(bn_op_name, "epsilon", epsilon); + engine_->AddOpAttr(bn_op_name, "momentum", static_cast(1.0)); + + auto scale_op_name = op_name + ":scale"; + auto get_lod_tensor = [this, &scope, &op_name](const std::string &var_name, + framework::LoDTensor *tensor) { + auto *v = scope.FindVar(var_name); + PADDLE_ENFORCE_NOT_NULL(v); + auto *t = v->GetMutable(); + tensor->Resize(t->dims()); + TensorCopySync(*t, platform::CPUPlace(), tensor); + }; + + framework::LoDTensor bias_t; + framework::LoDTensor mean_t; + framework::LoDTensor scale_t; + framework::LoDTensor variance_t; + get_lod_tensor(inputs["Bias"], &bias_t); + get_lod_tensor(inputs["Mean"], &mean_t); + get_lod_tensor(inputs["Scale"], &scale_t); + get_lod_tensor(inputs["Variance"], &variance_t); + + auto fill_shape = [](size_t n, std::vector shape) { + shape.insert(shape.begin(), 1); + if (shape.size() < n) { + shape.insert(shape.end(), n - shape.size(), 1); + } + return shape; + }; + Shape shape1(fill_shape(4, framework::vectorize2int(mean_t.dims()))); + Shape shape2(fill_shape(4, framework::vectorize2int(variance_t.dims()))); + auto *weight1 = + GraphGlobalMem::Global().template new_block(shape1); + auto *mean_data = static_cast(weight1->h_tensor().mutable_data()); + std::copy_n(mean_t.data(), mean_t.numel(), mean_data); + engine_->AddOpAttr(bn_op_name, "weight_1", *weight1); + + auto *weight2 = + GraphGlobalMem::Global().template new_block(shape2); + auto *variance_data = + static_cast(weight2->h_tensor().mutable_data()); + std::copy_n(variance_t.data(), variance_t.numel(), variance_data); + engine_->AddOpAttr(bn_op_name, "weight_2", *weight2); + + Shape shape3(std::vector({1, 1, 1, 1})); + auto *weight3 = + GraphGlobalMem::Global().template new_block(shape3); + auto *alpha_data = static_cast(weight3->h_tensor().mutable_data()); + float weight3_data[] = {1}; + std::copy(std::begin(weight3_data), std::end(weight3_data), alpha_data); + engine_->AddOpAttr(bn_op_name, "weight_3", *weight3); + + Shape scale_shape(fill_shape(4, framework::vectorize2int(scale_t.dims()))); + auto *scale = + GraphGlobalMem::Global().template new_block(scale_shape); + auto *scale_data = static_cast(scale->h_tensor().mutable_data()); + std::copy_n(scale_t.data(), scale_t.numel(), scale_data); + + Shape bias_shape(fill_shape(4, framework::vectorize2int(bias_t.dims()))); + auto *bias = + GraphGlobalMem::Global().template new_block(bias_shape); + auto *bias_data = static_cast(bias->h_tensor().mutable_data()); + std::copy_n(bias_t.data(), bias_t.numel(), bias_data); + + engine_->AddOp(scale_op_name, "Scale", {bn_output}, {output}); + engine_->AddOpAttr(scale_op_name, "axis", 1); + engine_->AddOpAttr(scale_op_name, "num_axes", 1); + engine_->AddOpAttr(scale_op_name, "bias_term", true); + engine_->AddOpAttr(scale_op_name, "weight_1", *scale); + engine_->AddOpAttr(scale_op_name, "weight_2", *bias); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(batch_norm, BatchNormOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/batch_norm.h b/paddle/fluid/inference/anakin/convert/batch_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..cee5c43ae76bf28284118380ca4c861d5cbedd1c --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/batch_norm.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class BatchNormOpConverter : public AnakinOpConverter { + public: + BatchNormOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~BatchNormOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/concat.cc b/paddle/fluid/inference/anakin/convert/concat.cc new file mode 100644 index 0000000000000000000000000000000000000000..e2d1111acbb60690167530a25aeaf59858b71987 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/concat.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/concat.h" +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void ConcatOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + int axis = boost::get(op_desc.GetAttr("axis")); + auto input_names = op_desc.Input("X"); + // PADDLE_ENFORCE(axis > 0, + // "The axis attr of Concat op should be large than 0 for trt"); + + auto y_name = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + engine_->AddOp(op_name, "Concat", input_names, {y_name}); + engine_->AddOpAttr(op_name, "axis", axis); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(concat, ConcatOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/concat.h b/paddle/fluid/inference/anakin/convert/concat.h new file mode 100644 index 0000000000000000000000000000000000000000..4ff2b6d85b758efc7529c5034a34e094ee06cccb --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/concat.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class ConcatOpConverter : public AnakinOpConverter { + public: + ConcatOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~ConcatOpConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/conv2d.cc b/paddle/fluid/inference/anakin/convert/conv2d.cc new file mode 100644 index 0000000000000000000000000000000000000000..b99c6e71c4dfd2b567d85904f57ebecf0ed9a1cc --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/conv2d.cc @@ -0,0 +1,87 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/conv2d.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void Conv2dOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Output").size(), 1UL); + + auto input_name = op_desc.Input("Input").front(); + auto output_name = op_desc.Output("Output").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Output").front(); + engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); + + auto *filter_v = scope.FindVar(op_desc.Input("Filter").front()); + PADDLE_ENFORCE_NOT_NULL(filter_v); + auto *filter_t = filter_v->GetMutable(); + std::unique_ptr weight_tensor( + new framework::LoDTensor()); + weight_tensor->Resize(filter_t->dims()); + TensorCopySync((*filter_t), platform::CPUPlace(), weight_tensor.get()); + + PADDLE_ENFORCE_EQ(weight_tensor->dims().size(), 4UL); + + // const int n_output = weight_tensor->dims()[0]; + // const int n_input = weight_tensor->dims()[1]; + const int filter_h = weight_tensor->dims()[2]; + const int filter_w = weight_tensor->dims()[3]; + // auto filter_num = n_input * filter_h * filter_w ; + auto filter_num = weight_tensor->dims()[0]; + engine_->AddOpAttr(op_name, "filter_num", filter_num); + engine_->AddOpAttr>(op_name, "kernel_size", {filter_h, filter_w}); + auto strides = boost::get>(op_desc.GetAttr("strides")); + engine_->AddOpAttr>(op_name, "strides", strides); + auto paddings = boost::get>(op_desc.GetAttr("paddings")); + engine_->AddOpAttr>(op_name, "padding", paddings); + auto dilations = boost::get>(op_desc.GetAttr("dilations")); + engine_->AddOpAttr>(op_name, "dilation_rate", dilations); + const int groups = boost::get(op_desc.GetAttr("groups")); + engine_->AddOpAttr(op_name, "group", groups); + engine_->AddOpAttr(op_name, "axis", 1); + engine_->AddOpAttr(op_name, "bias_term", false); + + auto weight_shape = framework::vectorize2int(filter_t->dims()); + Shape anakin_shape(weight_shape); + auto *weight1 = + GraphGlobalMem::Global().template new_block(anakin_shape); + float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); + std::copy_n(weight_tensor->data(), weight_tensor->numel(), cpu_data); + weight1->d_tensor().set_shape(anakin_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + engine_->AddOpAttr(op_name, "weight_1", *weight1); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(conv2d, Conv2dOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/registrar.cc b/paddle/fluid/inference/anakin/convert/conv2d.h similarity index 66% rename from paddle/fluid/inference/anakin/convert/registrar.cc rename to paddle/fluid/inference/anakin/convert/conv2d.h index 701ebdb2d43cf524330f946ac56d32dfa884f42a..75a30c10d481762fe5579ccb4d79feeba73dc98a 100644 --- a/paddle/fluid/inference/anakin/convert/registrar.cc +++ b/paddle/fluid/inference/anakin/convert/conv2d.h @@ -12,22 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/inference/anakin/convert/registrar.h" +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" namespace paddle { namespace inference { namespace anakin { -std::shared_ptr OpRegister::Get(const std::string &name) { - auto it = registry_.find(name); - if (it == registry_.end()) return nullptr; - return it->second(); -} +class Conv2dOpConverter : public AnakinOpConverter { + public: + Conv2dOpConverter() = default; -OpRegister *OpRegister::instance() { - static OpRegister factory; - return &factory; -} + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~Conv2dOpConverter() {} +}; } // namespace anakin } // namespace inference diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc new file mode 100644 index 0000000000000000000000000000000000000000..4d105430dd298076fa8aa4c1925329c3a0e356a1 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.cc @@ -0,0 +1,113 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/conv2d_fusion.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void Conv2dFusionOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Input("Bias").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Output").size(), 1UL); + + auto input_name = op_desc.Input("Input").front(); + auto output_name = op_desc.Output("Output").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Output").front(); + engine_->AddOp(op_name, "Convolution", {input_name}, {output_name}); + + auto *filter_v = scope.FindVar(op_desc.Input("Filter").front()); + PADDLE_ENFORCE_NOT_NULL(filter_v); + auto *filter_t = filter_v->GetMutable(); + + auto *b_v = scope.FindVar(op_desc.Input("Bias").front()); + PADDLE_ENFORCE_NOT_NULL(b_v); + auto *b_t = b_v->GetMutable(); + + std::unique_ptr weight_tensor( + new framework::LoDTensor()); + weight_tensor->Resize(filter_t->dims()); + TensorCopySync((*filter_t), platform::CPUPlace(), weight_tensor.get()); + + PADDLE_ENFORCE_EQ(weight_tensor->dims().size(), 4UL); + + // const int n_output = weight_tensor->dims()[0]; + // const int n_input = weight_tensor->dims()[1]; + const int filter_h = weight_tensor->dims()[2]; + const int filter_w = weight_tensor->dims()[3]; + // auto filter_num = n_input * filter_h * filter_w ; + auto filter_num = weight_tensor->dims()[0]; + engine_->AddOpAttr(op_name, "filter_num", filter_num); + engine_->AddOpAttr>(op_name, "kernel_size", {filter_h, filter_w}); + auto strides = boost::get>(op_desc.GetAttr("strides")); + engine_->AddOpAttr>(op_name, "strides", strides); + auto paddings = boost::get>(op_desc.GetAttr("paddings")); + engine_->AddOpAttr>(op_name, "padding", paddings); + auto dilations = boost::get>(op_desc.GetAttr("dilations")); + engine_->AddOpAttr>(op_name, "dilation_rate", dilations); + const int groups = boost::get(op_desc.GetAttr("groups")); + engine_->AddOpAttr(op_name, "group", groups); + engine_->AddOpAttr(op_name, "axis", 1); + engine_->AddOpAttr(op_name, "bias_term", true); + + auto weight_shape = framework::vectorize2int(filter_t->dims()); + Shape anakin_shape(weight_shape); + auto *weight1 = + GraphGlobalMem::Global().template new_block(anakin_shape); + float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); + std::copy_n(weight_tensor->data(), weight_tensor->numel(), cpu_data); + weight1->d_tensor().set_shape(anakin_shape); + weight1->d_tensor().copy_from(weight1->h_tensor()); + engine_->AddOpAttr(op_name, "weight_1", *weight1); + + auto bias_shape = framework::vectorize2int(b_t->dims()); + framework::LoDTensor bias_tensor; + bias_tensor.Resize(b_t->dims()); + TensorCopySync((*b_t), platform::CPUPlace(), &bias_tensor); + auto *bias_data = bias_tensor.data(); + bias_shape.insert(bias_shape.begin(), 1); + bias_shape.insert(bias_shape.begin(), 1); + bias_shape.insert(bias_shape.begin(), 1); + // bias_shape.push_back(1); + // bias_shape.push_back(1); + Shape anakin_bias_shape(bias_shape); + + auto *weight2 = GraphGlobalMem::Global().template new_block( + anakin_bias_shape); + float *cpu_data2 = static_cast(weight2->h_tensor().mutable_data()); + std::copy_n(bias_data, bias_tensor.numel(), cpu_data2); + weight2->d_tensor().set_shape(anakin_bias_shape); + weight2->d_tensor().copy_from(weight2->h_tensor()); + engine_->AddOpAttr(op_name, "weight_2", *weight2); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(conv2d_fusion, Conv2dFusionOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/conv2d_fusion.h b/paddle/fluid/inference/anakin/convert/conv2d_fusion.h new file mode 100644 index 0000000000000000000000000000000000000000..07359b9cba05bf7c885eb38d64816bdb718a6aba --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/conv2d_fusion.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class Conv2dFusionOpConverter : public AnakinOpConverter { + public: + Conv2dFusionOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~Conv2dFusionOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/density_prior_box.cc b/paddle/fluid/inference/anakin/convert/density_prior_box.cc new file mode 100644 index 0000000000000000000000000000000000000000..a55c153f99a815c0e0092b69b8e181630aed16bf --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/density_prior_box.cc @@ -0,0 +1,90 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/density_prior_box.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void DensityPriorBoxOpConverter::operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + auto input_name = op_desc.Input("Input").front(); + auto image_name = op_desc.Input("Image").front(); + auto output_name = op_desc.Output("Boxes").front(); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Boxes").front(); + + auto fixed_sizes = + boost::get>(op_desc.GetAttr("fixed_sizes")); + auto fixed_ratios = + boost::get>(op_desc.GetAttr("fixed_ratios")); + auto densities = boost::get>(op_desc.GetAttr("densities")); + std::vector dens; + for (auto& ele : densities) { + dens.push_back(static_cast(ele)); + } + + // lack flip + // auto clip = boost::get(op_desc.GetAttr("clip")); + auto variances = boost::get>(op_desc.GetAttr("variances")); + for (auto& ele : variances) { + LOG(INFO) << ele; + } + + // lack img_h, img_w + auto step_h = boost::get(op_desc.GetAttr("step_h")); + auto step_w = boost::get(op_desc.GetAttr("step_w")); + auto offset = boost::get(op_desc.GetAttr("offset")); + PTuple t_order; + t_order.push_back("MIN"); + t_order.push_back("COM"); + t_order.push_back("MAX"); + + std::vector temp_v = {}; + + engine_->AddOp(op_name, "PriorBox", {input_name, image_name}, {output_name}); + engine_->AddOpAttr>(op_name, "min_size", temp_v); + engine_->AddOpAttr>(op_name, "max_size", temp_v); + engine_->AddOpAttr>(op_name, "aspect_ratio", temp_v); + engine_->AddOpAttr>(op_name, "fixed_size", fixed_sizes); + engine_->AddOpAttr>(op_name, "fixed_ratio", fixed_ratios); + engine_->AddOpAttr>(op_name, "density", dens); + engine_->AddOpAttr(op_name, "is_flip", static_cast(false)); + engine_->AddOpAttr(op_name, "is_clip", static_cast(false)); + engine_->AddOpAttr>(op_name, "variance", variances); + engine_->AddOpAttr(op_name, "img_h", static_cast(0)); + engine_->AddOpAttr(op_name, "img_w", static_cast(0)); + engine_->AddOpAttr(op_name, "step_h", step_h); + engine_->AddOpAttr(op_name, "step_w", step_w); + engine_->AddOpAttr(op_name, "offset", offset); + engine_->AddOpAttr>(op_name, "order", t_order); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(density_prior_box, DensityPriorBoxOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/density_prior_box.h b/paddle/fluid/inference/anakin/convert/density_prior_box.h new file mode 100644 index 0000000000000000000000000000000000000000..44265cbf2e968e8821bc1a9ae3225c9b7d405235 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/density_prior_box.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class DensityPriorBoxOpConverter : public AnakinOpConverter { + public: + DensityPriorBoxOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~DensityPriorBoxOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/detection_out.cc b/paddle/fluid/inference/anakin/convert/detection_out.cc new file mode 100644 index 0000000000000000000000000000000000000000..67636651017cfb18967cf8dc76d4f4a552fbd021 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/detection_out.cc @@ -0,0 +1,72 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/detection_out.h" +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; + +namespace paddle { +namespace inference { +namespace anakin { + +void DetectionOutOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + auto target_name = op_desc.Input("TargetBox").front(); + auto prior_box_name = op_desc.Input("PriorBox").front(); + auto scores_name = op_desc.Input("Scores").front(); + auto output_name = op_desc.Output("Out").front(); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + auto code_type = boost::get(op_desc.GetAttr("code_type")); + auto background_label = boost::get(op_desc.GetAttr("background_label")); + auto score_threshold = boost::get(op_desc.GetAttr("score_threshold")); + auto nms_top_k = boost::get(op_desc.GetAttr("nms_top_k")); + auto nms_threshold = boost::get(op_desc.GetAttr("nms_threshold")); + auto nms_eta = boost::get(op_desc.GetAttr("nms_eta")); + auto keep_top_k = boost::get(op_desc.GetAttr("keep_top_k")); + std::string anakin_code_type; + if (code_type == "decode_center_size") { + anakin_code_type = "CENTER_SIZE"; + } else if (code_type == "encode_center_size") { + PADDLE_THROW( + "Not support encode_center_size code_type in DetectionOut of anakin"); + } + + engine_->AddOp(op_name, "DetectionOutput", + {target_name, scores_name, prior_box_name}, {output_name}); + engine_->AddOpAttr(op_name, "share_location", true); + engine_->AddOpAttr(op_name, "variance_encode_in_target", false); + engine_->AddOpAttr(op_name, "class_num", static_cast(0)); + engine_->AddOpAttr(op_name, "background_id", background_label); + engine_->AddOpAttr(op_name, "keep_top_k", keep_top_k); + engine_->AddOpAttr(op_name, "code_type", anakin_code_type); + engine_->AddOpAttr(op_name, "conf_thresh", score_threshold); + engine_->AddOpAttr(op_name, "nms_top_k", nms_top_k); + engine_->AddOpAttr(op_name, "nms_thresh", nms_threshold); + engine_->AddOpAttr(op_name, "nms_eta", nms_eta); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(detection_out, DetectionOutOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/detection_out.h b/paddle/fluid/inference/anakin/convert/detection_out.h new file mode 100644 index 0000000000000000000000000000000000000000..5bf1c3ecbc89795d075301a2fd568312236bd874 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/detection_out.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class DetectionOutOpConverter : public AnakinOpConverter { + public: + DetectionOutOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~DetectionOutOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/dropout.cc b/paddle/fluid/inference/anakin/convert/dropout.cc new file mode 100644 index 0000000000000000000000000000000000000000..ed6d7f7561cb78666855146864b33254026926ef --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/dropout.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/dropout.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void DropoutOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Mask").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto x_name = op_desc.Input("X").front(); + auto out_name = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + engine_->AddOp(op_name, "Scale", {x_name}, {out_name}); + + auto dropout_prob = boost::get(op_desc.GetAttr("dropout_prob")); + auto factor = 1 - dropout_prob; + Shape shape1(std::vector({1, 1, 1, 1})); + auto *weight1 = + GraphGlobalMem::Global().template new_block(shape1); + auto *factor_data = static_cast(weight1->h_tensor().mutable_data()); + float weight1_data[] = {factor}; + std::copy(std::begin(weight1_data), std::end(weight1_data), factor_data); + + engine_->AddOpAttr(op_name, "weight_1", *weight1); + engine_->AddOpAttr(op_name, "axis", 0); + engine_->AddOpAttr(op_name, "num_axes", 0); + engine_->AddOpAttr(op_name, "bias_term", false); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(dropout, DropoutOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/dropout.h b/paddle/fluid/inference/anakin/convert/dropout.h new file mode 100644 index 0000000000000000000000000000000000000000..2a0fb6e76ac8354d884f9d815a4df785248e6475 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/dropout.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class DropoutOpConverter : public AnakinOpConverter { + public: + DropoutOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~DropoutOpConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/elementwise.cc b/paddle/fluid/inference/anakin/convert/elementwise.cc new file mode 100644 index 0000000000000000000000000000000000000000..55b12390baf90a9365fd4d197b19a3c5cd675afd --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/elementwise.cc @@ -0,0 +1,88 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/elementwise.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void ElementwiseAddOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto x_name = op_desc.Input("X").front(); + auto y_name = op_desc.Input("Y").front(); + auto out_name = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + engine_->AddOp(op_name, "Eltwise", {x_name, y_name}, {out_name}); + std::string elementwise_type = "Add"; + engine_->AddOpAttr(op_name, "type", elementwise_type); + std::vector coeff = {1.0, 1.0}; + engine_->AddOpAttr>(op_name, "coeff", coeff); +} + +void ElementwiseMulOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto x_name = op_desc.Input("X").front(); + auto y_name = op_desc.Input("Y").front(); + auto out_name = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + engine_->AddOp(op_name, "Scale", {x_name, y_name}, {out_name}); + // Fill a number to weight_1 as a placeholder. + Shape shape1(std::vector({1, 1, 1, 1})); + auto *weight1 = + GraphGlobalMem::Global().template new_block(shape1); + auto *placeholder_data = + static_cast(weight1->h_tensor().mutable_data()); + float weight1_data[] = {1}; + std::copy(std::begin(weight1_data), std::end(weight1_data), placeholder_data); + engine_->AddOpAttr(op_name, "weight_1", *weight1); + + auto axis = boost::get(op_desc.GetAttr("axis")); + engine_->AddOpAttr(op_name, "axis", axis); + engine_->AddOpAttr(op_name, "num_axes", 1); + engine_->AddOpAttr(op_name, "bias_term", false); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(elementwise_add, ElementwiseAddOpConverter); +REGISTER_ANAKIN_OP_CONVERTER(elementwise_mul, ElementwiseMulOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/elementwise.h b/paddle/fluid/inference/anakin/convert/elementwise.h new file mode 100644 index 0000000000000000000000000000000000000000..47525e41daafcbca0c7c86bad44066f18a3ac79c --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/elementwise.h @@ -0,0 +1,49 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class ElementwiseAddOpConverter : public AnakinOpConverter { + public: + ElementwiseAddOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~ElementwiseAddOpConverter() {} + + private: +}; + +class ElementwiseMulOpConverter : public AnakinOpConverter { + public: + ElementwiseMulOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~ElementwiseMulOpConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/fc.cc b/paddle/fluid/inference/anakin/convert/fc.cc index 33a5aff1de2851ad55c2df83cc48ba86f8ded754..2514eb1e093b4e05b7e6b2814cfd8185b3aede6c 100644 --- a/paddle/fluid/inference/anakin/convert/fc.cc +++ b/paddle/fluid/inference/anakin/convert/fc.cc @@ -14,60 +14,108 @@ #include "paddle/fluid/inference/anakin/convert/fc.h" #include +#include +#include using anakin::graph::GraphGlobalMem; using anakin::AK_FLOAT; -using anakin::Precision; using anakin::saber::NV; -using anakin::saber::X86; using anakin::saber::Shape; -using anakin::PBlock; -using anakin::PTuple; namespace paddle { namespace inference { namespace anakin { -void FcOpConverter::operator()(const framework::proto::OpDesc &op, - const framework::Scope &scope, bool test_mode) { +void FcBaseOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { framework::OpDesc op_desc(op, nullptr); - PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); - PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); - PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + auto input_names = op_desc.InputNames(); + bool with_bias = input_names.size() == 3; + + std::string w_name = "Y"; + std::string i_name = "X"; + if (with_bias) { + w_name = "W"; + i_name = "Input"; + } - auto x_name = op_desc.Input("X").front(); auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); - auto *y_v = scope.FindVar(op_desc.Input("Y").front()); + + // get weights + auto *y_v = scope.FindVar(op_desc.Input(w_name).front()); PADDLE_ENFORCE_NOT_NULL(y_v); auto *y_t = y_v->GetMutable(); - auto input_name = op_desc.Input("X").front(); + auto input_name = op_desc.Input(i_name).front(); auto output_name = op_desc.Output("Out").front(); - auto weight_shape = framework::vectorize2int(y_t->dims()); engine_->AddOp(op_name, "Dense", {input_name}, {output_name}); - engine_->AddOpAttr(op_name, "bias_term", false); + engine_->AddOpAttr(op_name, "bias_term", with_bias); engine_->AddOpAttr(op_name, "axis", 1); + + auto weight_shape = framework::vectorize2int(y_t->dims()); int out_dim = weight_shape[1]; engine_->AddOpAttr(op_name, "out_dim", out_dim); + const int w_m = weight_shape[0]; + const int w_k = weight_shape[1]; - weight_shape.push_back(1); - weight_shape.push_back(1); + if (weight_shape.size() < 4UL) { + weight_shape.insert(weight_shape.begin(), 4UL - weight_shape.size(), 1); + } Shape anakin_shape(weight_shape); framework::LoDTensor weight_tensor; weight_tensor.Resize(y_t->dims()); TensorCopySync((*y_t), platform::CPUPlace(), &weight_tensor); + auto *weight_data = weight_tensor.data(); + PADDLE_ENFORCE(w_m * w_k == weight_tensor.numel()); + std::vector trans_weight_data(weight_tensor.numel()); + for (int i = 0; i < w_m; i++) { + for (int j = 0; j < w_k; j++) { + trans_weight_data[i + j * w_m] = weight_data[i * w_k + j]; + } + } auto *weight1 = GraphGlobalMem::Global().template new_block(anakin_shape); float *cpu_data = static_cast(weight1->h_tensor().mutable_data()); - std::copy_n(weight_tensor.data(), weight_tensor.numel(), cpu_data); + std::copy_n(trans_weight_data.data(), weight_tensor.numel(), cpu_data); weight1->d_tensor().set_shape(anakin_shape); weight1->d_tensor().copy_from(weight1->h_tensor()); engine_->AddOpAttr(op_name, "weight_1", *weight1); + + // get bias + if (with_bias) { + auto *b_v = scope.FindVar(op_desc.Input("Bias").front()); + PADDLE_ENFORCE_NOT_NULL(b_v); + auto *b_t = b_v->GetMutable(); + + auto bias_shape = framework::vectorize2int(b_t->dims()); + framework::LoDTensor bias_tensor; + bias_tensor.Resize(b_t->dims()); + TensorCopySync((*b_t), platform::CPUPlace(), &bias_tensor); + auto *bias_data = bias_tensor.data(); + bias_shape.insert(bias_shape.begin(), 1); + bias_shape.insert(bias_shape.begin(), 1); + bias_shape.insert(bias_shape.begin(), 1); + // bias_shape.push_back(1); + // bias_shape.push_back(1); + Shape anakin_bias_shape(bias_shape); + + auto *weight2 = GraphGlobalMem::Global().template new_block( + anakin_bias_shape); + float *cpu_data2 = static_cast(weight2->h_tensor().mutable_data()); + std::copy_n(bias_data, bias_tensor.numel(), cpu_data2); + weight2->d_tensor().set_shape(anakin_bias_shape); + weight2->d_tensor().copy_from(weight2->h_tensor()); + engine_->AddOpAttr(op_name, "weight_2", *weight2); + } } } // namespace anakin } // namespace inference } // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(mul, MulOpConverter); +REGISTER_ANAKIN_OP_CONVERTER(fc, FcOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/fc.h b/paddle/fluid/inference/anakin/convert/fc.h index b670486f12b36043a01ceb002da8756901ed01ce..060c649b19ef335a9e926eb205ec691a2a188fe1 100644 --- a/paddle/fluid/inference/anakin/convert/fc.h +++ b/paddle/fluid/inference/anakin/convert/fc.h @@ -20,19 +20,28 @@ namespace paddle { namespace inference { namespace anakin { -class FcOpConverter : public AnakinOpConverter { +class FcBaseOpConverter : public AnakinOpConverter { public: - FcOpConverter() = default; + FcBaseOpConverter() = default; virtual void operator()(const framework::proto::OpDesc &op, const framework::Scope &scope, bool test_mode) override; - virtual ~FcOpConverter() {} + virtual ~FcBaseOpConverter() {} +}; - private: +// with bias +class FcOpConverter : public FcBaseOpConverter { + public: + FcOpConverter() = default; +}; + +// without bias +class MulOpConverter : public FcBaseOpConverter { + public: + MulOpConverter() = default; }; -static Registrar register_fc_op_converter("fc"); } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/flatten.cc b/paddle/fluid/inference/anakin/convert/flatten.cc new file mode 100644 index 0000000000000000000000000000000000000000..c6c372bbef87de7f38c1f66a21c170cabac8c0ed --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/flatten.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/flatten.h" +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void FlattenOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1UL); + + auto input = op_desc.Input("X").front(); + auto output = op_desc.Output("Out").front(); + int axis = boost::get(op_desc.GetAttr("axis")); + PADDLE_ENFORCE(axis == 1, + "the anakin flatten op converter now only support aixs == 1."); + + std::vector out_dims = {0, -1, 1, 1}; + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + engine_->AddOp(op_name, "Reshape", {input}, {output}); + engine_->AddOpAttr>(op_name, "dims", out_dims); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(flatten, FlattenOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/flatten.h b/paddle/fluid/inference/anakin/convert/flatten.h new file mode 100644 index 0000000000000000000000000000000000000000..1ace76b16381980a9eaec12806e0bc94d7b1fb85 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/flatten.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class FlattenOpConverter : public AnakinOpConverter { + public: + FlattenOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~FlattenOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/im2sequence.cc b/paddle/fluid/inference/anakin/convert/im2sequence.cc new file mode 100644 index 0000000000000000000000000000000000000000..568d7e4746f11b13ce8ea9e5a47a1b43d1c12693 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/im2sequence.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/im2sequence.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void Im2SequenceConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Y").size(), 0); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto x_name = op_desc.Input("X").front(); + auto out_name = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + engine_->AddOp(op_name, "Im2Sequence", {x_name}, {out_name}); + + std::vector dilations = {1, 1}; + auto paddings = boost::get>(op_desc.GetAttr("paddings")); + auto strides = boost::get>(op_desc.GetAttr("strides")); + auto kernels = boost::get>(op_desc.GetAttr("kernels")); + + engine_->AddOpAttr>(op_name, "paddings", paddings); + engine_->AddOpAttr>(op_name, "strides", strides); + engine_->AddOpAttr>(op_name, "window_size", kernels); + engine_->AddOpAttr>(op_name, "dilations", dilations); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(im2sequence, Im2SequenceConverter); diff --git a/paddle/fluid/inference/anakin/convert/im2sequence.h b/paddle/fluid/inference/anakin/convert/im2sequence.h new file mode 100644 index 0000000000000000000000000000000000000000..3003eac2c6f416663c3e7c4c3e297b6347edfb47 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/im2sequence.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class Im2SequenceConverter : public AnakinOpConverter { + public: + Im2SequenceConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~Im2SequenceConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/op_converter.h b/paddle/fluid/inference/anakin/convert/op_converter.h index b9a221079dcec78fc86ebed7dfac0c59ec0f8540..4603681e1e8a3c2841a62cc88b49a84950910e73 100644 --- a/paddle/fluid/inference/anakin/convert/op_converter.h +++ b/paddle/fluid/inference/anakin/convert/op_converter.h @@ -14,15 +14,16 @@ #pragma once +#include #include #include #include #include +#include #include "framework/core/types.h" #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/inference/anakin/convert/registrar.h" #include "paddle/fluid/inference/anakin/engine.h" #include "paddle/fluid/inference/utils/singleton.h" #include "saber/saber_types.h" @@ -46,19 +47,14 @@ class AnakinOpConverter { bool test_mode = false) { framework::OpDesc op_desc(op, nullptr); std::string op_type = op_desc.Type(); - std::shared_ptr it{nullptr}; - - if (op_type == "mul") { - PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1UL); - std::string Y = op_desc.Input("Y")[0]; - std::cout << Y << parameters.count(Y) << std::endl; - if (parameters.count(Y)) { - it = OpRegister::instance()->Get("fc"); - } - } + AnakinOpConverter *it = nullptr; + + if (op_type == "reshape2") op_type = "reshape"; + if (op_type == "transpose2") op_type = "transpose"; + if (op_type == "flatten2") op_type = "flatten"; if (!it) { - it = OpRegister::instance()->Get(op_type); + it = Registry::Global().Lookup(op_type); } PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", op_type); it->SetEngine(engine); @@ -74,6 +70,63 @@ class AnakinOpConverter { ConvertOp(op, parameters, scope, engine); } } + + // The scope here should be inited with the parameter vars. + void ConvertBlockToAnakinEngine( + framework::BlockDesc *block_desc, framework::Scope *scope, + const std::vector &inputs, + const std::unordered_set ¶meters, + const std::vector &outputs, AnakinNvEngine *engine) { + framework::proto::BlockDesc *block_proto = block_desc->Proto(); + ConvertBlock(*block_proto, parameters, *scope, engine); + + engine->Freeze(); + // if the max_batch size + int max_batch_size = engine->GetMaxBatchSize(); + PADDLE_ENFORCE(max_batch_size > 0, + "the max_batch_size setted from config->EnableAnakinEngine " + "must largger than 0"); + // If the user does not specify this variable, we use the input shape from + // the block_desc. + auto max_input_shape = engine->GetMaxInputShape(); + std::map> temp_max_input_shape; + + for (auto &input : inputs) { + if (parameters.count(input)) continue; + std::vector input_shape; + input_shape.resize(4); + input_shape[0] = max_batch_size; + if (max_input_shape.count(input)) { + PADDLE_ENFORCE(max_input_shape[input].size() == 4, + "the dimensions of max_input_shape setted from " + "config->EnableAnakinEngine must be 4"); + for (int i = 1; i < 4; i++) { + input_shape[i] = max_input_shape[input][i]; + } + } else { + auto *var = block_desc->FindVar(input); + PADDLE_ENFORCE(var, "no variable called %s", input); + + auto var_shape = var->GetShape(); + std::cout << "input :" << input << std::endl; + PADDLE_ENFORCE(var_shape.size() == 4); + + for (size_t i = 1; i < var_shape.size(); i++) { + input_shape[i] = var_shape[i]; + } + } + temp_max_input_shape[input] = input_shape; + engine->SetInputShape(input, input_shape); + engine->Graph()->RegistVar(input); // For share from data. + } + engine->SetMaxInputShape(temp_max_input_shape); + engine->Optimize(); + + // For anakin share with fluid tensor. + engine->AllocTmpMem(); + engine->InitGraph(); + } + void SetEngine(AnakinNvEngine *engine) { engine_ = engine; } virtual ~AnakinOpConverter() {} @@ -91,22 +144,23 @@ class AnakinOpConverter { } // namespace inference } // namespace paddle -#define REGISTER_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ - struct anakin_##op_type__##_converter \ - : public ::paddle::framework::Registrar { \ - anakin_##op_type__##_converter() { \ - ::paddle::inference:: \ - Registry::Register< \ - ::paddle::inference::anakin::Converter__>(#op_type__); \ - } \ - }; \ - anakin_##op_type__##_converter anakin_##op_type__##_converter__; \ - int TouchConverterRegister_anakin_##op_type__() { \ - anakin_##op_type__##_converter__.Touch(); \ - return 0; \ +#define REGISTER_ANAKIN_OP_CONVERTER(op_type__, Converter__) \ + struct anakin_##op_type__##_converter \ + : public ::paddle::framework::Registrar { \ + anakin_##op_type__##_converter() { \ + LOG(INFO) << "register convert " << #op_type__; \ + ::paddle::inference::Registry< \ + ::paddle::inference::anakin::AnakinOpConverter>::Global() \ + .Register<::paddle::inference::anakin::Converter__>(#op_type__); \ + } \ + }; \ + anakin_##op_type__##_converter anakin_##op_type__##_converter__; \ + int TouchConverterRegister_anakin_##op_type__() { \ + anakin_##op_type__##_converter__.Touch(); \ + return 0; \ } -#define USE_ANAKIN_CONVERTER(op_type__) \ - extern int TouchConverterRegister_anakin_##op_type__(); \ - static int use_op_converter_anakin_##op_type__ __attribute__((unused)) = \ +#define USE_ANAKIN_CONVERTER(op_type__) \ + extern int TouchConverterRegister_anakin_##op_type__(); \ + int use_op_converter_anakin_##op_type__ __attribute__((unused)) = \ TouchConverterRegister_anakin_##op_type__(); diff --git a/paddle/fluid/inference/anakin/convert/pool2d.cc b/paddle/fluid/inference/anakin/convert/pool2d.cc new file mode 100644 index 0000000000000000000000000000000000000000..9b01d56a126b2ebc194f5b5bb5b2f52c298a316e --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/pool2d.cc @@ -0,0 +1,80 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/pool2d.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void Pool2dOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto x_name = op_desc.Input("X").front(); + auto y_name = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + bool global_pooling = boost::get(op_desc.GetAttr("global_pooling")); + std::string pool_type = + boost::get(op_desc.GetAttr("pooling_type")); + std::vector ksize = + boost::get>(op_desc.GetAttr("ksize")); + std::vector strides = + boost::get>(op_desc.GetAttr("strides")); + std::vector paddings = + boost::get>(op_desc.GetAttr("paddings")); + bool ceil_mode = boost::get(op_desc.GetAttr("ceil_mode")); + std::string anakin_pool_type; + if (pool_type == "max") { + anakin_pool_type = "MAX"; + } else if (pool_type == "avg") { + if (paddings[0] || paddings[1]) { + anakin_pool_type = "AVGEXC"; + } else { + anakin_pool_type = "AVG"; + } + } else { + PADDLE_THROW("TensorRT unsupported pooling type!"); + } + + engine_->AddOp(op_name, "Pooling", {x_name}, {y_name}); + engine_->AddOpAttr>(op_name, "pool_size", ksize); + engine_->AddOpAttr>(op_name, "strides", strides); + engine_->AddOpAttr>(op_name, "padding", paddings); + engine_->AddOpAttr(op_name, "method", anakin_pool_type); + engine_->AddOpAttr(op_name, "global_pooling", global_pooling); + engine_->AddOpAttr(op_name, "cmp_out_shape_floor_as_conv", !ceil_mode); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(pool2d, Pool2dOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/pool2d.h b/paddle/fluid/inference/anakin/convert/pool2d.h new file mode 100644 index 0000000000000000000000000000000000000000..1931a03c7ac236b4e57236cd1eb2947110f279a8 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/pool2d.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class Pool2dOpConverter : public AnakinOpConverter { + public: + Pool2dOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~Pool2dOpConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/registrar.h b/paddle/fluid/inference/anakin/convert/registrar.h deleted file mode 100644 index afce66ca084143ae203af9a60089aa2f5d18a725..0000000000000000000000000000000000000000 --- a/paddle/fluid/inference/anakin/convert/registrar.h +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include -#include - -namespace paddle { -namespace inference { -namespace anakin { - -class AnakinOpConverter; - -class OpRegister { - public: - OpRegister() = default; - std::shared_ptr Get(const std::string &name); - static OpRegister *instance(); - void OpRegisterFn(const std::string &name, - std::function()> fn) { - registry_[name] = fn; - } - - private: - using RegisterFnType = std::function()>; - std::map()>> - registry_; -}; - -template -class Registrar { - public: - Registrar(const std::string &name, Args... args) { - std::shared_ptr converter = - std::make_shared(std::move(args)...); - OpRegister::instance()->OpRegisterFn(name, - [converter]() { return converter; }); - } -}; - -} // namespace anakin -} // namespace inference -} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/relu.cc b/paddle/fluid/inference/anakin/convert/relu.cc new file mode 100644 index 0000000000000000000000000000000000000000..2ce96db1804a3d6d6d1afac79e4e1fc55ed4c35d --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/relu.cc @@ -0,0 +1,47 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/relu.h" +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; + +namespace paddle { +namespace inference { +namespace anakin { + +void ReluOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + auto input_name = op_desc.Input("X").front(); + auto output_name = op_desc.Output("Out").front(); + + engine_->AddOp(op_name, "ReLU", {input_name}, {output_name}); + engine_->AddOpAttr(op_name, "alpha", 0); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(relu, ReluOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/relu.h b/paddle/fluid/inference/anakin/convert/relu.h new file mode 100644 index 0000000000000000000000000000000000000000..54c4c2316eb32ef70696a2477211008e04892552 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/relu.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class ReluOpConverter : public AnakinOpConverter { + public: + ReluOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~ReluOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/reshape.cc b/paddle/fluid/inference/anakin/convert/reshape.cc new file mode 100644 index 0000000000000000000000000000000000000000..eee36d2f37ea79c841ac8bf60c6e533069d06240 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/reshape.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/reshape.h" +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void ReshapeOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1UL); + + auto input = op_desc.Input("X").front(); + auto output = op_desc.Output("Out").front(); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + engine_->AddOp(op_name, "Reshape", {input}, {output}); + + auto shape = boost::get>(op_desc.GetAttr("shape")); + if (shape.size() < 4) { + shape.insert(shape.end(), 4 - shape.size(), 1); + } + engine_->AddOpAttr>(op_name, "dims", shape); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(reshape, ReshapeOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/reshape.h b/paddle/fluid/inference/anakin/convert/reshape.h new file mode 100644 index 0000000000000000000000000000000000000000..970e8ce5572572bd18c34eeffa902fa2495c1cce --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/reshape.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class ReshapeOpConverter : public AnakinOpConverter { + public: + ReshapeOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~ReshapeOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/scale.cc b/paddle/fluid/inference/anakin/convert/scale.cc new file mode 100644 index 0000000000000000000000000000000000000000..6f3aa8c5d1111dc2829e241c9331eeb521003c03 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/scale.cc @@ -0,0 +1,56 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/scale.h" +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; + +namespace paddle { +namespace inference { +namespace anakin { + +void ScaleOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + auto input_name = op_desc.Input("X").front(); + auto output_name = op_desc.Output("Out").front(); + float scale = boost::get(op_desc.GetAttr("scale")); + float bias = boost::get(op_desc.GetAttr("bias")); + float bias_after_scale = + boost::get(op_desc.GetAttr("bias_after_scale")); + PADDLE_ENFORCE(bias_after_scale, + "The anakin scale layer only support bias after scale now."); + + engine_->AddOp(op_name, "Power", {input_name}, {output_name}); + engine_->AddOpAttr(op_name, "shift", bias); + engine_->AddOpAttr(op_name, "scale", scale); + engine_->AddOpAttr(op_name, "power", static_cast(1.0)); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(scale, ScaleOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/scale.h b/paddle/fluid/inference/anakin/convert/scale.h new file mode 100644 index 0000000000000000000000000000000000000000..b858e3c512494f80c7c3818a570e43d90d65251b --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/scale.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class ScaleOpConverter : public AnakinOpConverter { + public: + ScaleOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~ScaleOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/softmax.cc b/paddle/fluid/inference/anakin/convert/softmax.cc new file mode 100644 index 0000000000000000000000000000000000000000..d5cd8908ebf623f0334a3b4df2b19147c63f77a3 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/softmax.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/softmax.h" + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; + +namespace paddle { +namespace inference { +namespace anakin { + +void SoftMaxOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1UL); + + auto input = op_desc.Input("X").front(); + auto output = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + engine_->AddOp(op_name, "Softmax", {input}, {output}); + engine_->AddOpAttr(op_name, "axis", 2); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(softmax, SoftMaxOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/softmax.h b/paddle/fluid/inference/anakin/convert/softmax.h new file mode 100644 index 0000000000000000000000000000000000000000..0508da0c6fecaf29b7376005904235dadf04ea28 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/softmax.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class SoftMaxOpConverter : public AnakinOpConverter { + public: + SoftMaxOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~SoftMaxOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/split.cc b/paddle/fluid/inference/anakin/convert/split.cc new file mode 100644 index 0000000000000000000000000000000000000000..b8464a766d21e93426eb4a00b8caab2af5470055 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/split.cc @@ -0,0 +1,63 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/split.h" +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void SplitOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + auto input_name = op_desc.Input("X").front(); + auto y_names = op_desc.Output("Out"); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + int axis = boost::get(op_desc.GetAttr("axis")); + + std::vector output_lengths = + boost::get>(op_desc.GetAttr("sections")); + + int split_num = output_lengths.size(); + PADDLE_ENFORCE(split_num > 1, + "anakin split op converter: the split num should > 1"); + int num_sum = 0; + std::vector slice_point; + for (int i = 0; i < split_num - 1; i++) { + num_sum += output_lengths[i]; + slice_point.push_back(num_sum); + } + engine_->AddOp(op_name, "Slice", {input_name}, y_names); + engine_->AddOpAttr(op_name, "axis", axis); + engine_->AddOpAttr>(op_name, "slice_point", slice_point); + // slice_dim is useless in anakin + engine_->AddOpAttr(op_name, "slice_dim", 4); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle +REGISTER_ANAKIN_OP_CONVERTER(split, SplitOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/split.h b/paddle/fluid/inference/anakin/convert/split.h new file mode 100644 index 0000000000000000000000000000000000000000..a4c6a14e62168ffaf5ff67b5cf953d477ff9e34d --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/split.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class SplitOpConverter : public AnakinOpConverter { + public: + SplitOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~SplitOpConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/sum.cc b/paddle/fluid/inference/anakin/convert/sum.cc new file mode 100644 index 0000000000000000000000000000000000000000..df9104cf4631d86e0cbd87cb0e93a96d984953f5 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/sum.cc @@ -0,0 +1,54 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/sum.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void SumOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 2); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto input_names = op_desc.Input("X"); + auto out_name = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + + std::vector coeff = {1, 1}; + std::string elementwise_type = "Add"; + engine_->AddOp(op_name, "Eltwise", input_names, {out_name}); + engine_->AddOpAttr>(op_name, "coeff", coeff); + engine_->AddOpAttr(op_name, "type", elementwise_type); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(sum, SumOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/sum.h b/paddle/fluid/inference/anakin/convert/sum.h new file mode 100644 index 0000000000000000000000000000000000000000..ddecc4b3bcb84f83af95e77399847f191c785563 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/sum.h @@ -0,0 +1,37 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class SumOpConverter : public AnakinOpConverter { + public: + SumOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~SumOpConverter() {} + + private: +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/test_activation_op.cc b/paddle/fluid/inference/anakin/convert/test_activation_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..8bedd4a749a645829658291310347eeed1c0ea49 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_activation_op.cc @@ -0,0 +1,53 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/activation.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +static void test_activation_op(const std::string &op_type) { + auto *converter = Registry::Global().Lookup(op_type); + PADDLE_ENFORCE(converter != nullptr); + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("act-X", {10, 6, 1, 1}); + validator.DeclOutputVar("act-Out", {10, 6, 1, 1}); + framework::OpDesc desc; + desc.SetType(op_type); + desc.SetInput("X", {"act-X"}); + desc.SetOutput("Out", {"act-Out"}); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(5); +} + +TEST(sigm_op, test) { test_activation_op("sigmoid"); } +TEST(tanh_op, test) { test_activation_op("tanh"); } +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(sigmoid); +USE_OP(tanh); +USE_ANAKIN_CONVERTER(sigmoid); +USE_ANAKIN_CONVERTER(tanh); diff --git a/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc b/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..2832e1c8d167c646c9049beebc57a82fe416e62c --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_batch_norm_op.cc @@ -0,0 +1,71 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(batch_norm_op, test) { + std::unordered_set parameters( + {"batch_norm_scale", "batch_norm_bias", "batch_norm_mean", + "batch_norm_variance"}); + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + std::vector param_shape{2}; + + validator.DeclInputVar("batch_norm_X", {1, 2, 5, 5}); + validator.DeclParamVar("batch_norm_scale", param_shape); + validator.DeclParamVar("batch_norm_bias", param_shape); + validator.DeclParamVar("batch_norm_mean", param_shape); + validator.DeclParamVar("batch_norm_variance", param_shape); + validator.DeclOutputVar("batch_norm_Y", {1, 2, 5, 5}); + validator.DeclOutputVar("batch_norm_save_mean", param_shape); + validator.DeclOutputVar("batch_norm_save_variance", param_shape); + + // Prepare Op description + framework::OpDesc desc; + + desc.SetType("batch_norm"); + desc.SetInput("X", {"batch_norm_X"}); + desc.SetInput("Scale", {"batch_norm_scale"}); + desc.SetInput("Bias", {"batch_norm_bias"}); + desc.SetInput("Mean", {"batch_norm_mean"}); + desc.SetInput("Variance", {"batch_norm_variance"}); + desc.SetOutput("Y", {"batch_norm_Y"}); + desc.SetOutput("MeanOut", {"batch_norm_mean"}); + desc.SetOutput("VarianceOut", {"batch_norm_variance"}); + desc.SetOutput("SavedMean", {"batch_norm_save_mean"}); + desc.SetOutput("SavedVariance", {"batch_norm_save_variance"}); + + float eps = 1e-5f; + bool is_test = true; + desc.SetAttr("epsilon", eps); + desc.SetAttr("is_test", is_test); + + validator.SetOp(*desc.Proto()); + + std::unordered_set neglected_output = { + "batch_norm_save_mean", "batch_norm_save_variance", "batch_norm_mean", + "batch_norm_variance"}; + validator.Execute(1, neglected_output); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle +USE_OP(batch_norm); +USE_ANAKIN_CONVERTER(batch_norm); diff --git a/paddle/fluid/inference/anakin/convert/test_concat_op.cc b/paddle/fluid/inference/anakin/convert/test_concat_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..ecf44def5a2429360f0bcb92f00a0423e1d491cd --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_concat_op.cc @@ -0,0 +1,74 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/concat.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(concat_op, test) { + std::unordered_set parameters({""}); + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("concat_x1", {1, 2, 1, 1}); + validator.DeclInputVar("concat_x2", {1, 3, 1, 1}); + validator.DeclInputVar("concat_x3", {1, 1, 1, 1}); + validator.DeclOutputVar("concat_out", {1, 6, 1, 1}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("concat"); + desc.SetInput("X", {"concat_x1", "concat_x2", "concat_x3"}); + desc.SetOutput("Out", {"concat_out"}); + + int axis = 1; + desc.SetAttr("axis", axis); + + validator.SetOp(*desc.Proto()); + + validator.Execute(1); +} + +TEST(concat_op, test2) { + std::unordered_set parameters({""}); + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("concat_x1", {1, 4}); + validator.DeclInputVar("concat_x2", {3, 4}); + validator.DeclInputVar("concat_x3", {2, 4}); + validator.DeclOutputVar("concat_out", {6, 4}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("concat"); + desc.SetInput("X", {"concat_x1", "concat_x2", "concat_x3"}); + desc.SetOutput("Out", {"concat_out"}); + + int axis = 0; + desc.SetAttr("axis", axis); + + validator.SetOp(*desc.Proto()); + + validator.Execute(1); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle +USE_OP(concat); +USE_ANAKIN_CONVERTER(concat); diff --git a/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc b/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..6d93e50bc96b08b6ef7dd7c9d836038e335daae3 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_conv2d_op.cc @@ -0,0 +1,62 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/conv2d.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(conv2d_op, test) { + auto* conv2d_converter = + Registry::Global().Lookup("conv2d"); + ASSERT_TRUE(conv2d_converter != nullptr); + std::unordered_set parameters({"conv2d-Y"}); + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("conv2d-X", {1, 3, 3, 3}); + validator.DeclParamVar("conv2d-Y", {4, 3, 1, 1}); + validator.DeclOutputVar("conv2d-Out", {1, 4, 3, 3}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("conv2d"); + desc.SetInput("Input", {"conv2d-X"}); + desc.SetInput("Filter", {"conv2d-Y"}); + desc.SetOutput("Output", {"conv2d-Out"}); + + const std::vector strides({1, 1}); + const std::vector paddings({0, 0}); + const std::vector dilations({1, 1}); + const int groups = 1; + + desc.SetAttr("strides", strides); + desc.SetAttr("paddings", paddings); + desc.SetAttr("dilations", dilations); + desc.SetAttr("groups", groups); + + validator.SetOp(*desc.Proto()); + + validator.Execute(3); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(conv2d); +USE_ANAKIN_CONVERTER(conv2d); diff --git a/paddle/fluid/inference/anakin/convert/test_dropout_op.cc b/paddle/fluid/inference/anakin/convert/test_dropout_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..b2de5ae0a6e58eb25a4588571686a25500fe546c --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_dropout_op.cc @@ -0,0 +1,53 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/dropout.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(dropout_op, native) { + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("x", {1, 1, 2, 2}); + validator.DeclOutputVar("out", {1, 1, 2, 2}); + validator.DeclOutputVar("mask", {1, 1, 2, 2}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("dropout"); + desc.SetInput("X", {"x"}); + desc.SetOutput("Out", {"out"}); + desc.SetOutput("Mask", {"mask"}); + + float dropout_prob = 0.5; + desc.SetAttr("dropout_prob", dropout_prob); + desc.SetAttr("is_test", true); + + validator.SetOp(*desc.Proto()); + std::unordered_set neglected_output = {"mask"}; + validator.Execute(1, neglected_output); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(dropout); +USE_ANAKIN_CONVERTER(dropout); diff --git a/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc b/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..3a437f5fdb565609667b7a862c9b2bb13cdbeded --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_elementwise_op.cc @@ -0,0 +1,56 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/elementwise.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +static void test_elementwise_op(const std::string &op_type) { + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("x", {1, 1, 2, 2}); + validator.DeclInputVar("y", {1, 1, 2, 2}); + validator.DeclOutputVar("out", {1, 1, 2, 2}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType(op_type); + desc.SetInput("X", {"x"}); + desc.SetInput("Y", {"y"}); + desc.SetOutput("Out", {"out"}); + + int axis = -1; + desc.SetAttr("axis", axis); + + validator.SetOp(*desc.Proto()); + validator.Execute(1); +} + +TEST(elementwise_op, native_add) { test_elementwise_op("elementwise_add"); } +TEST(elementwise_op, native_mul) { test_elementwise_op("elementwise_mul"); } + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(elementwise_add); +USE_ANAKIN_CONVERTER(elementwise_add); +USE_OP(elementwise_mul); +USE_ANAKIN_CONVERTER(elementwise_mul); diff --git a/paddle/fluid/inference/anakin/convert/test_fc_op.cc b/paddle/fluid/inference/anakin/convert/test_fc_op.cc index 7b8ceefe28873f0ffb9cedbb04b832ba029b7de4..ee6d1dc291fe3733ff2e9f66dd453120fa266a55 100644 --- a/paddle/fluid/inference/anakin/convert/test_fc_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_fc_op.cc @@ -13,7 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/fluid/inference/anakin/convert/fc.h" #include "paddle/fluid/inference/anakin/convert/op_converter.h" #include "paddle/fluid/inference/anakin/convert/ut_helper.h" @@ -22,17 +21,15 @@ namespace inference { namespace anakin { TEST(fc_op, test) { - auto fc_converter = OpRegister::instance()->Get("fc"); - ASSERT_TRUE(fc_converter != nullptr); - // Registrar register_fc("fc"); - // auto fc = std::make_shared(); + auto* fc_converter = Registry::Global().Lookup("fc"); + ASSERT_TRUE(fc_converter); std::unordered_set parameters({"mul_y"}); framework::Scope scope; - AnakinConvertValidation validator(parameters, scope); - validator.DeclInputVar("mul_x", {1, 1, 1, 1}); - validator.DeclParamVar("mul_y", {1, 2}); - validator.DeclOutputVar("mul_out", {1, 1, 1, 2}); + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("mul_x", {1, 1, 2, 2}); + validator.DeclParamVar("mul_y", {4, 2}); + validator.DeclOutputVar("mul_out", {1, 2}); // Prepare Op description framework::OpDesc desc; @@ -40,8 +37,6 @@ TEST(fc_op, test) { desc.SetInput("X", {"mul_x"}); desc.SetInput("Y", {"mul_y"}); desc.SetOutput("Out", {"mul_out"}); - int num_flatten_dims = 3; - desc.SetAttr("x_num_col_dims", num_flatten_dims); validator.SetOp(*desc.Proto()); validator.Execute(10); @@ -52,3 +47,4 @@ TEST(fc_op, test) { } // namespace paddle USE_OP(mul); +USE_ANAKIN_CONVERTER(fc); diff --git a/paddle/fluid/inference/anakin/convert/test_flatten_op.cc b/paddle/fluid/inference/anakin/convert/test_flatten_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..d13281f11f03fdd75e585bce8b30e8780d81f7d7 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_flatten_op.cc @@ -0,0 +1,51 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(flatten_op, test) { + auto *converter = Registry::Global().Lookup("flatten"); + ASSERT_TRUE(converter); + + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("flatten-X", {3, 10, 10, 4}); + validator.DeclOutputVar("flatten-Out", {3, 400, 1, 1}); + framework::OpDesc desc; + desc.SetType("flatten"); + desc.SetInput("X", {"flatten-X"}); + desc.SetOutput("Out", {"flatten-Out"}); + desc.SetAttr("axis", 1); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(5); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(reshape); +USE_OP_ITSELF(flatten); +USE_ANAKIN_CONVERTER(flatten); diff --git a/paddle/fluid/inference/anakin/convert/test_im2sequence_op.cc b/paddle/fluid/inference/anakin/convert/test_im2sequence_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5e5764633125c867e27b0b52e0e6ef18714653b2 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_im2sequence_op.cc @@ -0,0 +1,55 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/im2sequence.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(im2sequence_op, native) { + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + + std::vector kernels = {6, 1}; + std::vector strides = {1, 1}; + std::vector paddings = {0, 0, 0, 0}; + + validator.DeclInputVar("x", {1, 1, 2, 2}); + validator.DeclOutputVar("out", {1, 1 * kernels[0] * kernels[1]}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("im2sequence"); + desc.SetInput("X", {"x"}); + desc.SetOutput("Out", {"out"}); + + desc.SetAttr("kernels", kernels); + desc.SetAttr("strides", strides); + desc.SetAttr("paddings", paddings); + + validator.SetOp(*desc.Proto()); + validator.Execute(1); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(im2sequence); +USE_ANAKIN_CONVERTER(im2sequence); diff --git a/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc b/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..1ac019467721605c539c7ada452d04d5134fa341 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_pool2d_op.cc @@ -0,0 +1,119 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +void test_pool2d(bool global_pooling, bool ceil_mode, + std::string pool_type = "max") { + auto* pool2d_converter = + Registry::Global().Lookup("pool2d"); + ASSERT_TRUE(pool2d_converter); + + framework::Scope scope; + std::unordered_set parameters; + AnakinConvertValidation validator(parameters, &scope); + + // The ITensor's Dims should not contain the batch size. + // So, the ITensor's Dims of input and output should be C * H * W. + validator.DeclInputVar("pool2d_x", {1, 3, 6, 7}); + if (global_pooling) + validator.DeclOutputVar("pool2d_out", {1, 3, 1, 1}); + else if (ceil_mode) + validator.DeclOutputVar("pool2d_out", {1, 3, 3, 4}); + else + validator.DeclOutputVar("pool2d_out", {1, 3, 3, 3}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("pool2d"); + desc.SetInput("X", {"pool2d_x"}); + desc.SetOutput("Out", {"pool2d_out"}); + + std::vector ksize({2, 2}); + std::vector strides({2, 2}); + std::vector paddings({0, 0}); + std::string pooling_t = pool_type; + + desc.SetAttr("pooling_type", pooling_t); + desc.SetAttr("ksize", ksize); + desc.SetAttr("strides", strides); + desc.SetAttr("paddings", paddings); + desc.SetAttr("global_pooling", global_pooling); + desc.SetAttr("ceil_mode", ceil_mode); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(1); +} + +void test_pool2d2(bool global_pooling, bool ceil_mode, + std::string pool_type = "max") { + auto* pool2d_converter = + Registry::Global().Lookup("pool2d"); + ASSERT_TRUE(pool2d_converter); + + framework::Scope scope; + std::unordered_set parameters; + AnakinConvertValidation validator(parameters, &scope); + + // The ITensor's Dims should not contain the batch size. + // So, the ITensor's Dims of input and output should be C * H * W. + validator.DeclInputVar("pool2d_x", {1, 1, 17, 17}); + validator.DeclOutputVar("pool2d_out", {1, 1, 17, 17}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("pool2d"); + desc.SetInput("X", {"pool2d_x"}); + desc.SetOutput("Out", {"pool2d_out"}); + + std::vector ksize({3, 3}); + std::vector strides({1, 1}); + std::vector paddings({1, 1}); + std::string pooling_t = pool_type; + + desc.SetAttr("pooling_type", pooling_t); + desc.SetAttr("ksize", ksize); + desc.SetAttr("strides", strides); + desc.SetAttr("paddings", paddings); + desc.SetAttr("global_pooling", global_pooling); + desc.SetAttr("ceil_mode", true); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(1); +} + +TEST(Pool2dOpConverter, normal) { test_pool2d(false, false); } +TEST(Pool2dOpConverter, test_global_pooling) { test_pool2d(true, false); } + +TEST(Pool2dOpConverter, max_ceil_test) { test_pool2d(false, true); } +TEST(Pool2dOpConverter, avg_ceil_test) { test_pool2d(false, true, "avg"); } +TEST(Pool2dOpConverter, avg_ceil_test2) { test_pool2d2(false, true, "avg"); } + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(pool2d); +USE_ANAKIN_CONVERTER(pool2d); diff --git a/paddle/fluid/inference/anakin/convert/test_relu_op.cc b/paddle/fluid/inference/anakin/convert/test_relu_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..04e624518a5a4477bbb41475b575f85be5a120d4 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_relu_op.cc @@ -0,0 +1,50 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/relu.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +static void test_activation_op(const std::string &op_type) { + auto *converter = Registry::Global().Lookup(op_type); + PADDLE_ENFORCE(converter != nullptr); + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("act-X", {10, 6, 1, 1}); + validator.DeclOutputVar("act-Out", {10, 6, 1, 1}); + framework::OpDesc desc; + desc.SetType(op_type); + desc.SetInput("X", {"act-X"}); + desc.SetOutput("Out", {"act-Out"}); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(5); +} + +TEST(sigm_op, test) { test_activation_op("relu"); } +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(relu); +USE_ANAKIN_CONVERTER(relu); diff --git a/paddle/fluid/inference/anakin/convert/test_reshape_op.cc b/paddle/fluid/inference/anakin/convert/test_reshape_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..306ebf510f29a87ca1ffa6df86e08f86b3f8ffbb --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_reshape_op.cc @@ -0,0 +1,74 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(reshape, test) { + auto* converter = Registry::Global().Lookup("reshape"); + ASSERT_TRUE(converter); + framework::Scope scope; + std::unordered_set parameters; + AnakinConvertValidation validator(parameters, &scope); + + // validator.DeclInputVar("reshape-X", {2, 3, 3, 1}); + // validator.DeclOutputVar("reshape-Out", {3, 2, 1, 3}); + validator.DeclInputVar("reshape-X", {1, 2, 4, 1}); + validator.DeclOutputVar("reshape-Out", {1, 8, 1, 1}); + + framework::OpDesc desc; + desc.SetType("reshape"); + desc.SetInput("X", {"reshape-X"}); + desc.SetOutput("Out", {"reshape-Out"}); + // desc.SetAttr("shape", std::vector({3, 2, 1, 3})); + desc.SetAttr("shape", std::vector({1, 8, 1, 1})); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + validator.Execute(1); +} + +TEST(reshape, test2) { + framework::Scope scope; + std::unordered_set parameters; + AnakinConvertValidation validator(parameters, &scope); + + validator.DeclInputVar("reshape-X", {1, 2, 4}); + validator.DeclOutputVar("reshape-Out", {1, 4, 2}); + + framework::OpDesc desc; + desc.SetType("reshape"); + desc.SetInput("X", {"reshape-X"}); + desc.SetOutput("Out", {"reshape-Out"}); + // desc.SetAttr("shape", std::vector({3, 2, 1, 3})); + desc.SetAttr("shape", std::vector({0, -1, 2})); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + validator.Execute(1); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(reshape); +USE_ANAKIN_CONVERTER(reshape); diff --git a/paddle/fluid/inference/anakin/convert/test_softmax_op.cc b/paddle/fluid/inference/anakin/convert/test_softmax_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..8c14fae0a67b9e488cf072535868a34f6195ab71 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_softmax_op.cc @@ -0,0 +1,49 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(softmax, test) { + auto* converter = Registry::Global().Lookup("softmax"); + ASSERT_TRUE(converter); + framework::Scope scope; + std::unordered_set parameters; + AnakinConvertValidation validator(parameters, &scope); + + validator.DeclInputVar("softmax-X", {1, 10, 2}); + validator.DeclOutputVar("softmax-Out", {1, 10, 2}); + + framework::OpDesc desc; + desc.SetType("softmax"); + desc.SetInput("X", {"softmax-X"}); + desc.SetOutput("Out", {"softmax-Out"}); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + validator.Execute(1); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(softmax); +USE_ANAKIN_CONVERTER(softmax); diff --git a/paddle/fluid/inference/anakin/convert/test_split_op.cc b/paddle/fluid/inference/anakin/convert/test_split_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..aa61c01a511c2337944aadbbc3d47893487de683 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_split_op.cc @@ -0,0 +1,110 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/split.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +template +void AnakinSliceTest(const std::vector &in_shape, + const std::vector §ions) { + std::unordered_set parameters({""}); + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + + validator.DeclInputVar("split_input", in_shape); + std::vector output_vars; + for (size_t i = 0; i < sections.size(); ++i) { + auto out_shape = in_shape; + out_shape[Axis] = sections[i]; + std::string output_name = "split_out" + std::to_string(i); + validator.DeclOutputVar(output_name, out_shape); + output_vars.push_back(output_name); + } + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("split"); + desc.SetInput("X", {"split_input"}); + desc.SetOutput("Out", output_vars); + + desc.SetAttr("axis", Axis); + desc.SetAttr("num", 0); + desc.SetAttr("sections", sections); + + validator.SetOp(*desc.Proto()); + + validator.Execute(1); +} + +// batch = 0, axis = 1, same shape +TEST(split_op, test_same_shape_axis1_batch1) { + AnakinSliceTest<1>({1, 4, 2, 2}, {2, 2}); +} +// batch = 0, axis = 1, different shape +TEST(split_op, test_different_shape_axis1_batch1) { + AnakinSliceTest<1>({1, 3, 2, 2}, {2, 1}); +} +// batch = 10, axis = 1, same shape +TEST(split_op, test_same_shape_axis1_batch10) { + AnakinSliceTest<1>({1, 4, 2, 2}, {2, 2}); +} +// batch = 10, axis = 1, different shape +TEST(split_op, test_different_shape_axis1_batch10) { + AnakinSliceTest<1>({1, 3, 2, 2}, {2, 1}); +} +// batch = 0, axis = 2, same shape +TEST(split_op, test_same_shape_axis2_batch1) { + AnakinSliceTest<2>({1, 3, 4, 2}, {2, 2}); +} +// batch = 0, axis = 2, different shape +TEST(split_op, test_different_shape_axis2_batch1) { + AnakinSliceTest<2>({1, 3, 3, 2}, {2, 1}); +} +// batch = 10, axis = 2, same shape +TEST(split_op, test_same_shape_axis2_batch10) { + AnakinSliceTest<2>({1, 3, 4, 2}, {2, 2}); +} +// batch = 10, axis = 2, different shape +TEST(split_op, test_different_shape_axis2_batch10) { + AnakinSliceTest<2>({1, 3, 3, 2}, {2, 1}); +} +// batch = 0, axis = 3, same shape +TEST(split_op, test_same_shape_axis3_batch1) { + AnakinSliceTest<3>({1, 3, 2, 4}, {2, 2}); +} +// batch = 0, axis = 3, different shape +TEST(split_op, test_different_shape_axis3_batch1) { + AnakinSliceTest<3>({1, 3, 2, 3}, {2, 1}); +} +// batch = 10, axis = 3, same shape +TEST(split_op, test_same_shape_axis3_batch10) { + AnakinSliceTest<3>({1, 3, 2, 4}, {2, 2}); +} +// batch = 10, axis = 3, different shape +TEST(split_op, test_different_shape_axis3_batch10) { + AnakinSliceTest<3>({1, 3, 2, 3}, {2, 1}); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(split); +USE_ANAKIN_CONVERTER(split); diff --git a/paddle/fluid/inference/anakin/convert/test_sum_op.cc b/paddle/fluid/inference/anakin/convert/test_sum_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..d6a59a0166be9239b480221cc076069239403429 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_sum_op.cc @@ -0,0 +1,48 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/sum.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" +#include "paddle/fluid/operators/sum_op.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(sum, native) { + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("sum_x1", {1, 2, 1, 2}); + validator.DeclInputVar("sum_x2", {1, 2, 1, 2}); + validator.DeclOutputVar("sum_out", {1, 2, 1, 2}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("sum"); + desc.SetInput("X", {"sum_x1", "sum_x2"}); + desc.SetOutput("Out", {"sum_out"}); + + validator.SetOp(*desc.Proto()); + validator.Execute(1); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(sum); +USE_ANAKIN_CONVERTER(sum); diff --git a/paddle/fluid/inference/anakin/convert/test_transpose_op.cc b/paddle/fluid/inference/anakin/convert/test_transpose_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..016ed26f02f782fe5032d8368f7767a5c94dfe9f --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/test_transpose_op.cc @@ -0,0 +1,73 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace anakin { + +TEST(transpose_op, test) { + auto* converter = Registry::Global().Lookup("transpose"); + ASSERT_TRUE(converter != nullptr); + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("transpose-X", {2, 3, 4, 5}); + validator.DeclOutputVar("transpose-Out", {4, 2, 5, 3}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("transpose"); + desc.SetInput("X", {"transpose-X"}); + desc.SetOutput("Out", {"transpose-Out"}); + desc.SetAttr("axis", std::vector({2, 0, 3, 1})); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(3); +} + +// test input shape's dims < 4 +TEST(transpose_op, test2) { + std::unordered_set parameters; + framework::Scope scope; + AnakinConvertValidation validator(parameters, &scope); + validator.DeclInputVar("transpose-X", {3, 4, 5}); + validator.DeclOutputVar("transpose-Out", {3, 5, 4}); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("transpose"); + desc.SetInput("X", {"transpose-X"}); + desc.SetOutput("Out", {"transpose-Out"}); + desc.SetAttr("axis", std::vector({0, 2, 1})); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(1); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +USE_OP(transpose); +USE_ANAKIN_CONVERTER(transpose); diff --git a/paddle/fluid/inference/anakin/convert/transpose.cc b/paddle/fluid/inference/anakin/convert/transpose.cc new file mode 100644 index 0000000000000000000000000000000000000000..6a887401034f9d8c0b8b6aa3eeffb6579e395029 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/transpose.cc @@ -0,0 +1,55 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/convert/transpose.h" +#include +#include +#include + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::saber::NV; +using anakin::saber::Shape; +using anakin::PTuple; + +namespace paddle { +namespace inference { +namespace anakin { + +void TransposeOpConverter::operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) { + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto input = op_desc.Input("X").front(); + auto output = op_desc.Output("Out").front(); + auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front(); + engine_->AddOp(op_name, "Permute", {input}, {output}); + + auto axis = boost::get>(op_desc.GetAttr("axis")); + size_t axis_size = axis.size(); + while (axis.size() < 4) { + axis.push_back(axis_size); + axis_size += 1; + } + engine_->AddOpAttr>(op_name, "dims", axis); +} + +} // namespace anakin +} // namespace inference +} // namespace paddle + +REGISTER_ANAKIN_OP_CONVERTER(transpose, TransposeOpConverter); diff --git a/paddle/fluid/inference/anakin/convert/transpose.h b/paddle/fluid/inference/anakin/convert/transpose.h new file mode 100644 index 0000000000000000000000000000000000000000..62d26b6a9cc9885682f5750df32018596f014b33 --- /dev/null +++ b/paddle/fluid/inference/anakin/convert/transpose.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/inference/anakin/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace anakin { + +class TransposeOpConverter : public AnakinOpConverter { + public: + TransposeOpConverter() = default; + + virtual void operator()(const framework::proto::OpDesc &op, + const framework::Scope &scope, + bool test_mode) override; + virtual ~TransposeOpConverter() {} +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/convert/ut_helper.h b/paddle/fluid/inference/anakin/convert/ut_helper.h index 38d8e596a738ac98c9f9870473f72dcc72b0e7aa..e0371d95347a521f499dd9454d284907b3048a04 100644 --- a/paddle/fluid/inference/anakin/convert/ut_helper.h +++ b/paddle/fluid/inference/anakin/convert/ut_helper.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include @@ -24,6 +25,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" #include "paddle/fluid/inference/anakin/engine.h" #include "paddle/fluid/inference/analysis/helper.h" #include "paddle/fluid/inference/utils/singleton.h" @@ -82,7 +84,7 @@ class AnakinConvertValidation { AnakinConvertValidation() = delete; AnakinConvertValidation(const std::unordered_set& parameters, - const framework::Scope& scope) + framework::Scope* scope) : parameters_(parameters), scope_(scope), place_(0) { PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0); engine_.reset(new AnakinEngine(true)); @@ -106,7 +108,7 @@ class AnakinConvertValidation { void DeclVar(const std::string& name, const std::vector dim_vec) { platform::CUDADeviceContext ctx(place_); - auto* x = scope_.Var(name); + auto* x = scope_->Var(name); auto* x_tensor = x->GetMutable(); x_tensor->Resize(framework::make_ddim(dim_vec)); RandomizeTensor(x_tensor, place_, ctx); @@ -118,15 +120,22 @@ class AnakinConvertValidation { // should init anakin engine here. Singleton::Global().ConvertOp( - desc, parameters_, scope_, engine_.get(), true /*test_mode*/); + desc, parameters_, *scope_, engine_.get(), true /*test_mode*/); engine_->Freeze(); + + std::map> temp_max_input_shape; for (const auto& input : op_desc_->InputArgumentNames()) { if (parameters_.count(input)) continue; - auto& t = inference::analysis::GetFromScope(scope_, + auto& t = inference::analysis::GetFromScope(*scope_, input); auto t_shape = framework::vectorize2int(t.dims()); + while (t_shape.size() < 4) { + t_shape.push_back(1); + } engine_->SetInputShape(input, t_shape); + temp_max_input_shape[input] = t_shape; } + engine_->SetMaxInputShape(temp_max_input_shape); engine_->Optimize(); engine_->InitGraph(); } @@ -138,14 +147,14 @@ class AnakinConvertValidation { std::unordered_set neglected_output = {}) { // Execute Fluid Op platform::CUDADeviceContext ctx(place_); - op_->Run(scope_, place_); + op_->Run(*scope_, place_); // std::vector input_vector; // std::vector output_vector; std::map inputs; for (const auto& input : op_desc_->InputArgumentNames()) { if (parameters_.count(input)) continue; - auto* var = scope_.FindVar(input); + auto* var = scope_->FindVar(input); auto tensor = var->GetMutable(); inputs.insert({input, tensor}); } @@ -155,45 +164,38 @@ class AnakinConvertValidation { for (const auto& output : op_desc_->OutputArgumentNames()) { if (neglected_output.count(output)) continue; std::vector fluid_out; - auto* var = scope_.FindVar(output); + auto* var = scope_->FindVar(output); auto tensor = var->GetMutable(); framework::TensorToVector(*tensor, ctx, &fluid_out); fluid_outputs.push_back(fluid_out); - // size_t fluid_out_size = fluid_out.size(); - /*for (size_t i = 0; i < fluid_out_size; i++) { - std::cout << fluid_out[i] << std::endl; - }*/ outputs.insert({output, tensor}); } - engine_->Execute(inputs, outputs); + engine_->Execute(inputs, outputs, stream_); int i_output = 0; for (const auto& output : op_desc_->OutputArgumentNames()) { if (neglected_output.count(output)) continue; std::vector anakin_out; - auto* var = scope_.FindVar(output); + auto* var = scope_->FindVar(output); auto tensor = var->GetMutable(); framework::TensorToVector(*tensor, ctx, &anakin_out); size_t anakin_out_size = anakin_out.size(); auto fluid_out = fluid_outputs[i_output++]; for (size_t i = 0; i < anakin_out_size; i++) { - LOG(INFO) << "Output[" << i << "]: anakin[" << anakin_out[i] << "], " - << "fluid[" << fluid_out[i] << "]"; + EXPECT_LT(std::abs(fluid_out[i] - anakin_out[i]), 1e-3); } } } - framework::Scope& scope() { return scope_; } - private: std::unique_ptr engine_{nullptr}; cudaStream_t stream_; std::unique_ptr op_; std::unique_ptr op_desc_; const std::unordered_set& parameters_; - framework::Scope& scope_; + framework::Scope* scope_; platform::CUDAPlace place_; }; diff --git a/paddle/fluid/inference/anakin/engine.cc b/paddle/fluid/inference/anakin/engine.cc index 6549991474f4834f0c3ef74c60d294cca6bebc91..ccf78ad7e56306d24af829c45c888021f4e3fbc4 100644 --- a/paddle/fluid/inference/anakin/engine.cc +++ b/paddle/fluid/inference/anakin/engine.cc @@ -33,9 +33,15 @@ namespace inference { namespace anakin { template -AnakinEngine::AnakinEngine(bool need_summary) +AnakinEngine::AnakinEngine( + bool need_summary, int device, int max_batch_size, + std::map> max_input_shape) : graph_(new AnakinGraphT()), - net_(new AnakinNetT(need_summary)) {} + net_(new AnakinNetT(need_summary)) { + device_ = device; + max_batch_size_ = max_batch_size; + max_input_shape_ = max_input_shape; +} template AnakinEngine::~AnakinEngine() {} @@ -63,34 +69,53 @@ void AnakinEngine::AddOp( template void AnakinEngine::Execute( const std::map &inputs, - const std::map &outputs) { + const std::map &outputs, + cudaStream_t stream) { + cudaDeviceSynchronize(); for (const auto &input : inputs) { auto *tensor = input.second; auto *data = tensor->data(); - auto shape = framework::vectorize2int(tensor->dims()); - ::anakin::saber::Shape anakin_shape(shape); + + auto fluid_input_shape = framework::vectorize2int(tensor->dims()); + while (fluid_input_shape.size() < 4) { + fluid_input_shape.push_back(1); + } auto *anakin_input = net_->get_in(input.first); + std::vector max_input_shape = max_input_shape_[input.first]; + int max_shape_sum = + std::accumulate(max_input_shape.begin(), max_input_shape.end(), 1, + std::multiplies()); + + PADDLE_ENFORCE(max_shape_sum >= tensor->numel(), + "The anakin input max shape should be greater than" + " or equal to the real input shape, Please set the max " + "input shape using EnableAnakinEngine"); + anakin_input->reshape(fluid_input_shape); + ::anakin::saber::Tensor tmp_anakin_tensor(data, TargetT(), 0, - anakin_shape); - anakin_input->share_from(tmp_anakin_tensor); + fluid_input_shape); + anakin_input->copy_from(tmp_anakin_tensor); } - + net_->prediction(); + cudaDeviceSynchronize(); for (const auto &output : outputs) { + platform::CUDAPlace gpu_place(device_); auto *tensor = output.second; - auto *data = tensor->data(); - auto shape = framework::vectorize2int(tensor->dims()); - ::anakin::saber::Shape anakin_shape(shape); auto *anakin_output = net_->get_out(output.first); - ::anakin::saber::Tensor tmp_anakin_tensor(data, TargetT(), 0, - anakin_shape); - anakin_output->share_from(tmp_anakin_tensor); + auto *anakin_data = anakin_output->data(); + auto anakin_output_shape = anakin_output->valid_shape(); + tensor->Resize(framework::make_ddim(anakin_output_shape)); + auto *fluid_data = tensor->mutable_data(gpu_place); + memory::Copy(gpu_place, static_cast(fluid_data), gpu_place, + static_cast(anakin_data), + tensor->numel() * sizeof(float), stream); } - net_->prediction(); + cudaDeviceSynchronize(); } template void AnakinEngine::Freeze() { - PADDLE_ENFORCE(graph_->Freeze(), "Freeze anakin subgraph."); + PADDLE_ENFORCE(graph_->Freeze_v3(), "Freeze anakin subgraph."); } template diff --git a/paddle/fluid/inference/anakin/engine.h b/paddle/fluid/inference/anakin/engine.h index d8f32f57be5aabb91ba720c6457a03f15083db43..4845ffdf5b9dcfa99d1f421d47328beb4b196298 100644 --- a/paddle/fluid/inference/anakin/engine.h +++ b/paddle/fluid/inference/anakin/engine.h @@ -15,9 +15,11 @@ #pragma once #include +#include #include #include #include +#include #include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/engine.h" @@ -26,8 +28,12 @@ #include "framework/core/net/net.h" #include "framework/core/types.h" #include "framework/graph/graph.h" +#include "framework/graph/graph_global_mem.h" #include "saber/saber_types.h" +using anakin::Precision; +using anakin::saber::NV; + namespace anakin { template @@ -46,8 +52,13 @@ namespace anakin { template class AnakinEngine { + using NetT = ::anakin::Net; + using GraphT = ::anakin::graph::Graph; + public: - explicit AnakinEngine(bool need_summary = false); + explicit AnakinEngine( + bool need_summary = false, int device = 0, int max_batch_size = 1, + std::map> max_input_shape = {}); ~AnakinEngine(); void InitGraph(); void SetInputShape(const std::string &name, std::vector shape); @@ -61,20 +72,72 @@ class AnakinEngine { PADDLE_ENFORCE(graph_->AddOpAttr(op_name, attr_name, attr_value), "Add operation's attribution."); } - + NetT *Net() { return net_.get(); } + GraphT *Graph() { return graph_.get(); } std::unique_ptr Clone(); + const std::map> &GetMaxInputShape() { + return max_input_shape_; + } + void SetMaxInputShape(std::map> shape) { + max_input_shape_ = shape; + } + int GetMaxBatchSize() { return max_batch_size_; } void Freeze(); void Optimize(); + void AllocTmpMem() { + PADDLE_ENFORCE(net_->alloc_memory_first(*graph_), + "anakin alloc temp memory first failed"); + } + void Save(std::string path) { graph_->save(path); } + + bool IsInit() { return initialized_; } + int GetDevice() { return device_; } void Execute(const std::map &inputs, - const std::map &outputs); + const std::map &outputs, + cudaStream_t stream); private: - using NetT = ::anakin::Net; - using GraphT = ::anakin::graph::Graph; + bool initialized_{false}; + int max_batch_size_; + std::map> max_input_shape_; + int device_; std::unique_ptr graph_; std::unique_ptr net_; }; +class AnakinEngineManager { + using AnakinNvEngineT = AnakinEngine; + + public: + bool HasEngine(const std::string &name) const { + if (engines_.count(name) == 0) return false; + return engines_.at(name).get() != nullptr; + } + AnakinNvEngineT *Get(const std::string &name) const { + return engines_.at(name).get(); + } + + AnakinNvEngineT *Create( + bool need_summary, int device, int max_batch_size, + std::map> max_input_shape, + std::string engine_name) { + std::unique_lock lk(mut_); + auto *p = new AnakinEngine( + need_summary, device, max_batch_size, max_input_shape); + engines_[engine_name].reset(p); + return p; + } + + void DeleteALL() { + for (auto &item : engines_) { + item.second.reset(nullptr); + } + } + + private: + std::unordered_map> engines_; + std::mutex mut_; +}; } // namespace anakin } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/anakin/op_teller.cc b/paddle/fluid/inference/anakin/op_teller.cc new file mode 100644 index 0000000000000000000000000000000000000000..90cf021de2f9d365fd1fa21f7d189d3fcd9d3ab2 --- /dev/null +++ b/paddle/fluid/inference/anakin/op_teller.cc @@ -0,0 +1,67 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/anakin/op_teller.h" + +namespace paddle { +namespace inference { +namespace anakin { + +// Just tell by the op_types. +struct SimpleOpTypeSetTeller : public Teller { + SimpleOpTypeSetTeller() { + teller_set.insert("mul"); + teller_set.insert("fc"); + teller_set.insert("conv2d_fusion"); + teller_set.insert("split"); + teller_set.insert("relu"); + teller_set.insert("pool2d"); + teller_set.insert("elementwise_add"); + teller_set.insert("elementwise_mul"); + teller_set.insert("concat"); + teller_set.insert("tanh"); + teller_set.insert("conv2d"); + teller_set.insert("batch_norm"); + teller_set.insert("softmax"); + teller_set.insert("flatten2"); + teller_set.insert("reshape2"); + teller_set.insert("transpose2"); + teller_set.insert("density_prior_box"); + teller_set.insert("detection_out"); + teller_set.insert("dropout"); + teller_set.insert("sigmoid"); + teller_set.insert("sum"); + } + + bool operator()(const std::string& op_type, + const framework::OpDesc& desc) override { + return teller_set.count(op_type); + } + + private: + std::unordered_set teller_set; +}; + +bool OpTeller::Tell(const std::string& op_type, const framework::OpDesc& desc) { + for (auto& teller : tellers_) { + if ((*teller)(op_type, desc)) return true; + } + return false; +} + +OpTeller::OpTeller() { tellers_.emplace_back(new SimpleOpTypeSetTeller); } + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/op_teller.h b/paddle/fluid/inference/anakin/op_teller.h new file mode 100644 index 0000000000000000000000000000000000000000..15a42067b8438e60851a50e454abde95782d90ee --- /dev/null +++ b/paddle/fluid/inference/anakin/op_teller.h @@ -0,0 +1,70 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include "paddle/fluid/framework/op_desc.h" + +namespace paddle { +namespace inference { +namespace anakin { + +/* + * Single Op teller definition. + * One can override this and define a more complex tell logic, considerring more + * issues such as op_desc. + */ +struct Teller { + virtual bool operator()(const std::string& op_type, + const framework::OpDesc& desc) = 0; + + virtual ~Teller() = default; +}; +/* + * A real example: + * + * struct SomeTeller : public Teller { + * bool operator()(const std::string& op_type, + * const framework::OpDesc& desc) override { + * return op_type == "fc" && desc.Inputs().size() == 2; + * } + *}; + */ + +/* + * class OpTeller helps to tell whether a fluid + * operator can be transformed to a TensorRT layer. + */ +class OpTeller { + public: + static OpTeller& Global() { + static std::unique_ptr x(new OpTeller); + return *x; + } + + bool Tell(const std::string& op_type, const framework::OpDesc& desc); + + private: + OpTeller(); + + private: + std::vector> tellers_; +}; + +} // namespace anakin +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/anakin/test_anakin_engine.cc b/paddle/fluid/inference/anakin/test_anakin_engine.cc index 571294d3e22fb9489686bfcb2f3a64198099f970..8fd6b8bec9ada6dec67fd24a2457713203431ebf 100644 --- a/paddle/fluid/inference/anakin/test_anakin_engine.cc +++ b/paddle/fluid/inference/anakin/test_anakin_engine.cc @@ -17,9 +17,6 @@ limitations under the License. */ #include -#include "framework/core/net/net.h" -#include "framework/graph/graph.h" -#include "framework/graph/graph_global_mem.h" #include "paddle/fluid/inference/anakin/engine.h" using anakin::graph::GraphGlobalMem; @@ -84,7 +81,9 @@ TEST_F(TestAnakinEngine, Execute) { auto *y_data = y.mutable_data(platform::CUDAPlace()); std::map outputs = {{"y", &y}}; - engine_->Execute(inputs, outputs); + cudaStream_t stream; + + engine_->Execute(inputs, outputs, stream); auto *y_data_gpu = y_data; float y_data_cpu[2]; cudaMemcpy(y_data_cpu, y_data_gpu, sizeof(float) * 2, cudaMemcpyDeviceToHost); diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index 997f3575f457b67d4df5000705724b46cd8b951d..29f16943e0c13fbe080e8e073b081583f1d14d11 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -23,6 +23,7 @@ #pragma once +#include #include #include #include @@ -41,8 +42,11 @@ namespace inference { namespace analysis { using framework::ir::Graph; + +#ifdef PADDLE_WITH_MKLDNN using VarQuantScale = std::unordered_map>; +#endif /* * The argument definition of both Pass and PassManagers. @@ -55,6 +59,8 @@ struct Argument { using unique_ptr_t = std::unique_ptr>; using fusion_statis_t = std::unordered_map; + using engine_opt_info_t = std::map; + using anakin_max_shape_t = std::map>; bool Has(const std::string& key) const { return valid_fields_.count(key); } @@ -107,12 +113,14 @@ struct Argument { private: \ unique_ptr_t field__##_; + DECL_ARGUMENT_FIELD(predictor_id, PredictorID, int); // Model path DECL_ARGUMENT_FIELD(model_dir, ModelDir, std::string); // Model specified with program and parameters files. DECL_ARGUMENT_FIELD(model_program_path, ModelProgramPath, std::string); DECL_ARGUMENT_FIELD(model_params_path, ModelParamsPath, std::string); DECL_ARGUMENT_FIELD(model_from_memory, ModelFromMemory, bool); + DECL_ARGUMENT_FIELD(engine_opt_info, EngineOptInfo, engine_opt_info_t); // The overall graph to work on. DECL_ARGUMENT_UNIQUE_FIELD(main_graph, MainGraph, framework::ir::Graph); @@ -132,6 +140,7 @@ struct Argument { DECL_ARGUMENT_FIELD(mkldnn_enabled_op_types, MKLDNNEnabledOpTypes, std::unordered_set); +#ifdef PADDLE_WITH_MKLDNN // A set of op types to enable their quantized kernels DECL_ARGUMENT_FIELD(quantize_enabled_op_types, QuantizeEnabledOpTypes, std::unordered_set); @@ -142,6 +151,7 @@ struct Argument { // Scales for variables to be quantized DECL_ARGUMENT_FIELD(quant_var_scales, QuantVarScales, VarQuantScale); +#endif // Passed from config. DECL_ARGUMENT_FIELD(use_gpu, UseGPU, bool); @@ -155,6 +165,11 @@ struct Argument { DECL_ARGUMENT_FIELD(tensorrt_use_static_engine, TensorRtUseStaticEngine, bool); + DECL_ARGUMENT_FIELD(anakin_max_input_shape, AnakinMaxInputShape, + anakin_max_shape_t); + DECL_ARGUMENT_FIELD(anakin_max_batch_size, AnakinMaxBatchSize, int); + DECL_ARGUMENT_FIELD(use_anakin, UseAnakin, bool); + // Memory optimized related. DECL_ARGUMENT_FIELD(enable_memory_optim, EnableMemoryOptim, bool); DECL_ARGUMENT_FIELD(static_memory_optim, StaticMemoryOptim, bool); diff --git a/paddle/fluid/inference/analysis/ir_pass_manager.cc b/paddle/fluid/inference/analysis/ir_pass_manager.cc index 1556caa46412c8a2dacd44f2187666c6a1fda6bf..7a96ac11d8ef754f38070862a70744947412882b 100644 --- a/paddle/fluid/inference/analysis/ir_pass_manager.cc +++ b/paddle/fluid/inference/analysis/ir_pass_manager.cc @@ -13,9 +13,12 @@ // limitations under the License. #include "paddle/fluid/inference/analysis/ir_pass_manager.h" +#include +#include #include #include #include +#include #include #include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/graph.h" @@ -61,6 +64,7 @@ void IRPassManager::CreatePasses(Argument *argument, pass->Set("mkldnn_enabled_op_types", new std::unordered_set( argument->mkldnn_enabled_op_types())); +#ifdef PADDLE_WITH_MKLDNN } else if (pass_name == "cpu_quantize_placement_pass") { pass->Set("quantize_enabled_op_types", new std::unordered_set( @@ -71,6 +75,7 @@ void IRPassManager::CreatePasses(Argument *argument, } else if (pass_name == "cpu_quantize_pass") { pass->Set("quant_var_scales", new VarQuantScale(argument->quant_var_scales())); +#endif } else if (pass_name == "tensorrt_subgraph_pass") { pass->Set("workspace_size", new int(argument->tensorrt_workspace_size())); pass->Set("max_batch_size", new int(argument->tensorrt_max_batch_size())); @@ -83,16 +88,40 @@ void IRPassManager::CreatePasses(Argument *argument, AnalysisConfig::Precision::kInt8; pass->Set("enable_int8", new bool(enable_int8)); - std::string model_opt_cache_dir = - argument->Has("model_dir") - ? argument->model_dir() - : GetDirRoot(argument->model_program_path()); - pass->Set( - "model_opt_cache_dir", - new std::string(GetOrCreateModelOptCacheDir(model_opt_cache_dir))); + + bool use_static_engine = argument->tensorrt_use_static_engine(); + bool model_from_memory = argument->model_from_memory(); + bool int8_valid = !(model_from_memory && enable_int8); + PADDLE_ENFORCE(int8_valid, + "TRT INT8 Now don't support model load from memory."); + + if ((!model_from_memory && use_static_engine) || enable_int8) { + std::string model_opt_cache_dir = + argument->Has("model_dir") + ? argument->model_dir() + : GetDirRoot(argument->model_program_path()); + pass->Set( + "model_opt_cache_dir", + new std::string(GetOrCreateModelOptCacheDir(model_opt_cache_dir))); + } + pass->Set("gpu_device_id", new int(argument->gpu_device_id())); + pass->Set("use_static_engine", new bool(use_static_engine)); + pass->Set("model_from_memory", new bool(argument->model_from_memory())); + pass->Set("engine_opt_info", new std::map( + argument->engine_opt_info())); + } + + if (pass_name == "anakin_subgraph_pass") { + pass->Set("program", + new framework::ProgramDesc *(&argument->main_program())); pass->Set("gpu_device_id", new int(argument->gpu_device_id())); - pass->Set("use_static_engine", - new bool(argument->tensorrt_use_static_engine())); + pass->Set("model_from_memory", new bool(argument->model_from_memory())); + pass->Set("engine_opt_info", new std::map( + argument->engine_opt_info())); + pass->Set("predictor_id", new int(argument->predictor_id())); + pass->Set("max_input_shape", new std::map>( + argument->anakin_max_input_shape())); + pass->Set("max_batch_size", new int(argument->anakin_max_batch_size())); } pre_pass = pass_name; diff --git a/paddle/fluid/inference/analysis/ir_passes/CMakeLists.txt b/paddle/fluid/inference/analysis/ir_passes/CMakeLists.txt index 410a90132aa7657a23b858570763547fe53730a0..05a3d7ddfdb08c98866cc0a08ec4113866c7567d 100644 --- a/paddle/fluid/inference/analysis/ir_passes/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/ir_passes/CMakeLists.txt @@ -1,4 +1,4 @@ -cc_library(subgraph_detector SRCS subgraph_detector.cc DEPS proto_desc) +cc_library(subgraph_detector SRCS subgraph_detector.cc subgraph_util.cc DEPS proto_desc) if(WITH_TESTING) add_dependencies(subgraph_detector gtest) endif() @@ -14,3 +14,15 @@ if (WITH_GPU AND TENSORRT_FOUND) file(APPEND ${pass_file} "USE_PASS(tensorrt_subgraph_pass);\n") set(INFER_IR_PASSES ${INFER_IR_PASSES} tensorrt_subgraph_pass CACHE INTERNAL "") endif() + +if (ANAKIN_FOUND) + cc_library(anakin_subgraph_pass SRCS anakin_subgraph_pass.cc DEPS subgraph_detector anakin_op_teller) + + set(analysis_deps ${analysis_deps} + subgraph_detector anakin_subgraph_pass + CACHE INTERNAL "") + + set(pass_file ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/paddle_inference_pass.h) + file(APPEND ${pass_file} "USE_PASS(anakin_subgraph_pass);\n") + set(INFER_IR_PASSES ${INFER_IR_PASSES} anakin_subgraph_pass CACHE INTERNAL "") +endif() diff --git a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..12deed2533bba713701849d58f8c5cf3269b85da --- /dev/null +++ b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.cc @@ -0,0 +1,219 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "paddle/fluid/framework/ir/graph_pattern_detector.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/op_teller.h" +#include "paddle/fluid/inference/analysis/helper.h" +#include "paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h" +#include "paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h" +#include "paddle/fluid/string/pretty_log.h" + +namespace paddle { +namespace inference { +namespace analysis { + +using framework::ir::Node; + +std::unique_ptr analysis::AnakinSubgraphPass::ApplyImpl( + std::unique_ptr graph) const { + framework::ir::FusePassBase::Init("anakin_subgraph_pass", graph.get()); + + auto teller = [](const framework::ir::Node *node) { + if (!node->IsOp() || !node->Op()) return false; + return anakin::OpTeller::Global().Tell(node->Op()->Type(), *node->Op()); + }; + + SubGraphFuser fuser(graph.get(), teller, 6 /* min_subgraph_size */); + fuser(); + + std::vector graph_param_names = + ExtractParameters(graph->Nodes()); + + // those parameter already exist in anakin, and should not have another copy + // in fluid. + std::vector repetitive_params; + + for (auto *node : graph->Nodes()) { + if (node->IsOp() && !Agent(node).subgraph()->empty()) { + CreateAnakinOp(node, graph.get(), graph_param_names, &repetitive_params); + std::unordered_set nodes2remove( + Agent(node).subgraph()->begin(), Agent(node).subgraph()->end()); + framework::ir::GraphSafeRemoveNodes(graph.get(), nodes2remove); + } + } + + std::unordered_set nodes2remove; + for (auto *node : graph->Nodes()) { + if (node->IsOp() && Agent(node).deleted()) { + nodes2remove.insert(node); + } + } + framework::ir::GraphSafeRemoveNodes(graph.get(), nodes2remove); + graph->Set(framework::ir::kRepetitiveParamAttr, + new std::vector(repetitive_params)); + + return graph; +} + +std::string GenerateAnakinEngineKey(const std::set &engine_inputs, + const std::set &engine_outputs, + std::string id) { + std::string engine_hash_key = ""; + for (auto name : engine_inputs) { + engine_hash_key += name; + } + for (auto name : engine_outputs) { + engine_hash_key += name; + } + engine_hash_key += id; + auto engine_key = std::to_string(std::hash()(engine_hash_key)); + return engine_key; +} + +void AnakinSubgraphPass::CreateAnakinOp( + framework::ir::Node *node, Graph *graph, + const std::vector &graph_params, + std::vector *repetitive_params) const { + auto *op_desc = node->Op(); + auto &subgraph = *Agent(node).subgraph(); + PADDLE_ENFORCE(!subgraph.empty()); + + framework::ProgramDesc *program_desc = + Get("program"); + // Add new block for TensorRTEngineOP + const framework::BlockDesc &main_block = + program_desc->Block(framework::kRootBlockIndex); + // const framework::BlockDesc& main_block = program_desc->Block(0); + framework::BlockDesc *new_block = program_desc->AppendBlock(main_block); + + // An fake block desc. + framework::proto::BlockDesc block_proto; + framework::BlockDesc block_desc(nullptr, &block_proto); + block_desc.Proto()->set_parent_idx(-1); + block_desc.Proto()->set_idx(0); + string::PrettyLogDetail("--- detect a sub-graph with %d nodes", + subgraph.size()); + + for (auto *node : subgraph) { + auto *new_block_op = new_block->AppendOp(); + auto *op = block_desc.AppendOp(); + *new_block_op->Proto() = *node->Op()->Proto(); + *op->Proto() = *node->Op()->Proto(); + } + + // Then, we will use the input_names_with_id and output_names_with_id to + // generate the eigine key. + // So, We use set instead of unordered_set here to ensure that the engine key + // is unique. + std::set input_names; + std::set input_names_with_id; + std::vector params; + for (auto *x : node->inputs) { + input_names.insert(x->Name()); + input_names_with_id.insert(x->Name() + std::to_string(x->id())); + if (std::count(graph_params.begin(), graph_params.end(), x->Name()) > 0) { + params.push_back(x->Name()); + } + } + std::copy(params.begin(), params.end(), + std::back_inserter(*repetitive_params)); + op_desc->SetInput( + "Xs", std::vector(input_names.begin(), input_names.end())); + + std::set output_names; + std::set output_names_with_id; + for (auto *x : node->outputs) { + output_names.insert(x->Name()); + output_names_with_id.insert(x->Name() + std::to_string(x->id())); + } + + op_desc->SetOutput( + "Ys", std::vector(output_names.begin(), output_names.end())); + op_desc->SetType("anakin_engine"); + + std::unordered_map output_name_map; + auto &subgraph_nodes = *Agent(node).subgraph(); + + // The following procedure is used to rename all the intermediate + // variables and the output variables of the subgraph. + RenameAndGetOutputs(subgraph_nodes, &block_desc, input_names_with_id, + &output_names_with_id, &output_names, &output_name_map, + false); + + // When anakin engine runs at the end of the operation, + // output_mapping help us copy the data from the renamed ITensor + // to Tensor. + std::vector output_mapping; + for (auto name : output_names) { + PADDLE_ENFORCE(output_name_map.count(name) != 0); + output_mapping.push_back(output_name_map[name]); + } + + auto *vars = block_desc.Proto()->mutable_vars(); + for (framework::ir::Node *node : graph->Nodes()) { + if (node->IsVar() && node->Var()) { + *vars->Add() = *node->Var()->Proto(); + } + } + + PADDLE_ENFORCE(!block_desc.Proto()->vars().empty(), + "the block has no var-desc"); + PADDLE_ENFORCE(!output_mapping.empty()); + op_desc->SetBlockAttr("sub_block", new_block); + SetAttr(op_desc->Proto(), "subgraph", + block_desc.Proto()->SerializeAsString()); + // Set attrs + SetAttr(op_desc->Proto(), "parameters", params); + SetAttr(op_desc->Proto(), "output_name_mapping", output_mapping); + int predictor_id = Get("predictor_id"); + auto engine_key = GenerateAnakinEngineKey( + input_names_with_id, output_names_with_id, std::to_string(predictor_id)); + + SetAttr(op_desc->Proto(), "engine_key", engine_key); + auto max_input_shape = + Get>>("max_input_shape"); + auto max_batch_size = Get("max_batch_size"); + + auto *anakin_engine = + inference::Singleton::Global().Create( + true, Get("gpu_device_id"), max_batch_size, max_input_shape, + engine_key); + + auto *scope = param_scope(); + std::unordered_set param_set(params.begin(), params.end()); + framework::BlockDesc block_desc_temp(nullptr, block_desc.Proto()); + + inference::Singleton::Global() + .ConvertBlockToAnakinEngine( + &block_desc_temp, scope, + std::vector(input_names.begin(), input_names.end()), + param_set, output_mapping, anakin_engine); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle + +REGISTER_PASS(anakin_subgraph_pass, + paddle::inference::analysis::AnakinSubgraphPass); diff --git a/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..c13b9ecda42336a79187185070104ba9ac4b67bc --- /dev/null +++ b/paddle/fluid/inference/analysis/ir_passes/anakin_subgraph_pass.h @@ -0,0 +1,44 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include +#include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/inference/anakin/engine.h" +#include "paddle/fluid/inference/analysis/ir_passes/subgraph_util.h" + +using anakin::Precision; +using anakin::saber::NV; +namespace paddle { +namespace inference { +namespace analysis { + +class AnakinSubgraphPass : public framework::ir::FusePassBase { + public: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; + + private: + void CreateAnakinOp(framework::ir::Node *x, framework::ir::Graph *graph, + const std::vector &graph_params, + std::vector *repetitive_params) const; + void CleanIntermediateOutputs(framework::ir::Node *node); +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc b/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc index 96befe7f8a5d16402338ac337daa96d714b4d310..76b1671601eec95d64b36effc5727481dcd070e2 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_detector.cc @@ -14,6 +14,8 @@ limitations under the License. */ #include "paddle/fluid/inference/analysis/ir_passes/subgraph_detector.h" #include +#include +#include #include #include "paddle/fluid/framework/ir/graph_helper.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h" @@ -418,7 +420,7 @@ void SubGraphFuser::ReplaceNodesWithSubGraphs() { // Node that contains this subgraph 2. Mark the nodes inside the sub-graph // as deleted. 3. Replace the deleted node with the new Block Node. framework::OpDesc empty_desc; - empty_desc.SetType("tensorrt_engine"); + empty_desc.SetType("anakin_engine"); auto *block_node = graph_->CreateOpNode(&empty_desc); Agent(block_node).set_subgraph({}); auto io = ExtractInputAndOutputOfSubGraph(subgraph); diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc new file mode 100644 index 0000000000000000000000000000000000000000..a17ee1b707a7f950cddc62373a9a57c793d5528f --- /dev/null +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc @@ -0,0 +1,152 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file defines the the class to partition a graph. + */ + +#include "paddle/fluid/inference/analysis/ir_passes/subgraph_util.h" +#include +#include + +namespace paddle { +namespace inference { +namespace analysis { +using framework::ir::Node; + +std::vector ExtractParameters( + const std::unordered_set &nodes) { + // We can judge whether a variable is a parameter by + // its presistable property, but sometimes the presistable + // of the feed op output is true, so we have to identify it. + std::vector feed_outputs; + for (const auto &node : nodes) { + if (!node->IsOp()) continue; + std::string op_type = node->Op()->Type(); + if (op_type == "feed" || op_type == "fetch") { + std::vector output_names = node->Op()->OutputArgumentNames(); + std::copy(output_names.begin(), output_names.end(), + std::back_inserter(feed_outputs)); + } + } + + std::vector parameters; + for (const auto &node : nodes) { + if (!node->IsVar()) continue; + if (node->Var()->Persistable() && + std::find(feed_outputs.begin(), feed_outputs.end(), node->Name()) == + feed_outputs.end()) { + parameters.push_back(node->Name()); + } + } + return parameters; +} + +void RenameAndGetOutputs( + const std::vector &subgraph_nodes, + framework::BlockDesc *block_desc, + const std::set &input_names_with_id, + std::set *output_names_with_id, + std::set *output_names, + std::unordered_map *output_name_map, + bool is_trt) { + //// In the normal case, the paddle-trt exists bug when runing the googlenet. + // When there are more than two convolutions of 1 * 1 with the same input, the + // paddle-tensorrt will do the merging optimization, which fuse those conv + // into one conv, and then trigger bug. So, We should use strategy to avoid + // this optimization for the time being. This bug will be fixed in the future. + std::unordered_map + same_hierarchy_conv2d_num_map; + + for (size_t index = 0; index < block_desc->OpSize(); ++index) { + framework::proto::OpDesc *op = block_desc->Op(index)->Proto(); + framework::OpDesc op_desc(*op, nullptr); + auto correspond_node = subgraph_nodes[index]; + PADDLE_ENFORCE_EQ(correspond_node->Name(), op->type()); + + std::unordered_map var2id; + std::unordered_map in_vars; + for (auto *in_var : correspond_node->inputs) { + var2id[in_var->Name()] = in_var->id(); + in_vars[in_var->Name()] = in_var; + } + // rename for the input variables of op inside subgraph + for (int i = 0; i < op->inputs_size(); i++) { + // one input + auto *in_var = op->mutable_inputs(i); + std::vector replaced_names; + for (int k = 0; k < in_var->arguments_size(); k++) { // all the arguments + std::string arg_value = in_var->arguments(k); + std::string arg_value_with_id = + arg_value + std::to_string(var2id[arg_value]); + if (input_names_with_id.count(arg_value_with_id)) { + replaced_names.push_back(arg_value); + } else { + replaced_names.push_back(arg_value_with_id); + } + } + in_var->clear_arguments(); + for (size_t k = 0; k < replaced_names.size(); k++) { + in_var->add_arguments(replaced_names[k]); + } + } + var2id.clear(); + for (auto out_var : correspond_node->outputs) { + var2id[out_var->Name()] = out_var->id(); + } + + if (op_desc.Type() == "conv2d" && is_trt) { + auto input_var_name = op_desc.Input("Input").front(); + auto filter_var_name = op_desc.Input("Filter").front(); + auto out_var_name = op_desc.Output("Output").front(); + auto filter_shape = in_vars[filter_var_name]->Var()->GetShape(); + const std::vector strides = + boost::get>(op_desc.GetAttr("strides")); + const std::vector paddings = + boost::get>(op_desc.GetAttr("paddings")); + if (same_hierarchy_conv2d_num_map[input_var_name] > 0) { + (*output_names_with_id) + .insert(out_var_name + std::to_string(var2id[out_var_name])); + (*output_names).insert(out_var_name); + } else if (filter_shape[2] == 1 && filter_shape[3] == 1 && + strides[0] == 1 && strides[1] == 1 && paddings[0] == 0 && + paddings[1] == 0) { + same_hierarchy_conv2d_num_map[input_var_name] += 1; + } + } + + // rename for the output variables of op inside subgraph + for (int i = 0; i < op->outputs_size(); i++) { + framework::proto::OpDesc_Var *out_var = op->mutable_outputs(i); + std::vector replaced_names; + for (int k = 0; k < out_var->arguments_size(); k++) { + std::string arg_value = out_var->arguments(k); + std::string arg_value_with_id = + arg_value + std::to_string(var2id[arg_value]); + if (output_names_with_id->count(arg_value_with_id)) { + (*output_name_map)[arg_value] = arg_value_with_id; + } + replaced_names.push_back(arg_value_with_id); + } + out_var->clear_arguments(); + for (size_t k = 0; k < replaced_names.size(); k++) { + out_var->add_arguments(replaced_names[k]); + } + } + } +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.h b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.h new file mode 100644 index 0000000000000000000000000000000000000000..3cf21bf5f426a7142626e6ae1db6ee478418d08a --- /dev/null +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.h @@ -0,0 +1,49 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file defines the the class to partition a graph. + */ + +#pragma once +#include +#include +#include +#include +#include + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_traits.h" +#include "paddle/fluid/framework/ir/node.h" + +namespace paddle { +namespace inference { +namespace analysis { +using framework::ir::Node; + +std::vector ExtractParameters( + const std::unordered_set &nodes); + +void RenameAndGetOutputs( + const std::vector &subgraph_nodes, + framework::BlockDesc *block_desc, + const std::set &input_names_with_id, + std::set *output_names_with_id, + std::set *output_names, + std::unordered_map *output_name_map, + bool is_trt = true); + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc index d4e2da8957f2057b21460d00b71e9717c63ed054..59399403276b59c143fc3e06a53643e0a85cf559 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.cc @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include #include "paddle/fluid/framework/ir/graph_pattern_detector.h" @@ -30,17 +31,6 @@ namespace analysis { using framework::ir::Node; -std::vector ExtractParameters( - const std::unordered_set &nodes); - -void RenameAndGetOutputs( - const std::vector &subgraph_nodes, - framework::BlockDesc *block_desc, - const std::set &input_names_with_id, - std::set *output_names_with_id, - std::set *output_names, - std::unordered_map *output_name_map); - std::unique_ptr analysis::TensorRtSubgraphPass::ApplyImpl( std::unique_ptr graph) const { framework::ir::FusePassBase::Init("tensorrt_subgraph_pass", graph.get()); @@ -209,186 +199,86 @@ void TensorRtSubgraphPass::CreateTensorRTOp( SetAttr(op_desc->Proto(), "parameters", params); auto enable_int8 = Get("enable_int8"); + auto use_static_engine = Get("use_static_engine"); auto engine_key = GenerateEngineKey(input_names_with_id, output_names_with_id, std::to_string(0)); // Get "" when there is no cached calibration table data. - std::string calibration_data = GetTrtCalibTableData( - Get("model_opt_cache_dir"), engine_key, enable_int8); + bool load_from_memory = Get("model_from_memory"); + std::string calibration_data = ""; + if (enable_int8) { + calibration_data = GetTrtCalibTableData( + Get("model_opt_cache_dir"), engine_key, enable_int8); + } SetAttr(op_desc->Proto(), "calibration_data", calibration_data); SetAttr(op_desc->Proto(), "enable_int8", enable_int8); SetAttr(op_desc->Proto(), "engine_key", engine_key); - SetAttr(op_desc->Proto(), "engine_serialized_data", std::string("")); + std::string trt_engine_serialized_data = ""; + + SetAttr(op_desc->Proto(), "engine_serialized_data", + trt_engine_serialized_data); std::unique_ptr calibrator; if (enable_int8 && calibration_data.size() != 0) { calibrator.reset(new tensorrt::TRTInt8Calibrator(calibration_data)); } - - bool use_static_engine = Get("use_static_engine"); // When in int8 mode and calibration_mode, the program just produce the // calibration table data. bool calibration_mode = (enable_int8 && calibration_data.size() == 0); - if (!calibration_mode && use_static_engine) { - std::copy(params.begin(), params.end(), - std::back_inserter(*repetitive_params)); - std::string trt_engine_serialized_data = GetTrtEngineSerializedData( - Get("model_opt_cache_dir"), engine_key); + if (calibration_mode) { + // calibraion mode means generate int8 calibration table data process. + return; + } - if (trt_engine_serialized_data.empty()) { - LOG(INFO) << "Prepare TRT engine (Optimize model structure, Select OP " - "kernel etc). This process may cost a lot of time."; - std::unique_ptr trt_engine( - new tensorrt::TensorRTEngine( - Get("max_batch_size"), Get("workspace_size"), - enable_int8, calibrator.get(), Get("gpu_device_id"))); - auto *scope = param_scope(); - framework::BlockDesc block_desc_temp(nullptr, block_desc.Proto()); - std::unordered_set param_set(params.begin(), params.end()); - inference::Singleton::Global() - .ConvertBlockToTRTEngine( - &block_desc_temp, *scope, - std::vector(input_names.begin(), input_names.end()), - param_set, output_mapping, trt_engine.get()); - nvinfer1::IHostMemory *serialized_engine_data = trt_engine->Serialize(); - trt_engine_serialized_data = - std::string((const char *)serialized_engine_data->data(), - serialized_engine_data->size()); - SaveTrtEngineSerializedDataToFile( - GetTrtEngineSerializedPath(Get("model_opt_cache_dir"), - engine_key), - trt_engine_serialized_data); - } else { + std::copy(params.begin(), params.end(), + std::back_inserter(*repetitive_params)); + bool need_serialize = (use_static_engine && !load_from_memory); + + if (need_serialize) { + trt_engine_serialized_data = GetTrtEngineSerializedData( + Get("model_opt_cache_dir"), engine_key); + // we can load the engine info serialized before from the disk. + if (!trt_engine_serialized_data.empty()) { + SetAttr(op_desc->Proto(), "engine_serialized_data", + trt_engine_serialized_data); LOG(INFO) << "Load TRT Optimized Info from " << GetTrtEngineSerializedPath( Get("model_opt_cache_dir"), engine_key); - } - - SetAttr(op_desc->Proto(), "engine_serialized_data", - trt_engine_serialized_data); - } -} - -std::vector ExtractParameters( - const std::unordered_set &nodes) { - // We can judge whether a variable is a parameter by - // its presistable property, but sometimes the presistable - // of the feed op output is true, so we have to identify it. - std::vector feed_outputs; - for (const auto &node : nodes) { - if (!node->IsOp()) continue; - std::string op_type = node->Op()->Type(); - if (op_type == "feed" || op_type == "fetch") { - std::vector output_names = node->Op()->OutputArgumentNames(); - std::copy(output_names.begin(), output_names.end(), - std::back_inserter(feed_outputs)); + return; } } - std::vector parameters; - for (const auto &node : nodes) { - if (!node->IsVar()) continue; - if (node->Var()->Persistable() && - std::find(feed_outputs.begin(), feed_outputs.end(), node->Name()) == - feed_outputs.end()) { - parameters.push_back(node->Name()); - } - } - return parameters; -} - -void RenameAndGetOutputs( - const std::vector &subgraph_nodes, - framework::BlockDesc *block_desc, - const std::set &input_names_with_id, - std::set *output_names_with_id, - std::set *output_names, - std::unordered_map *output_name_map) { - //// In the normal case, the paddle-trt exists bug when runing the googlenet. - // When there are more than two convolutions of 1 * 1 with the same input, the - // paddle-tensorrt will do the merging optimization, which fuse those conv - // into one conv, and then trigger bug. So, We should use strategy to avoid - // this optimization for the time being. This bug will be fixed in the future. - std::unordered_map - same_hierarchy_conv2d_num_map; - - for (size_t index = 0; index < block_desc->OpSize(); ++index) { - framework::proto::OpDesc *op = block_desc->Op(index)->Proto(); - framework::OpDesc op_desc(*op, nullptr); - auto correspond_node = subgraph_nodes[index]; - PADDLE_ENFORCE_EQ(correspond_node->Name(), op->type()); - - std::unordered_map var2id; - std::unordered_map in_vars; - for (auto *in_var : correspond_node->inputs) { - var2id[in_var->Name()] = in_var->id(); - in_vars[in_var->Name()] = in_var; - } - // rename for the input variables of op inside subgraph - for (int i = 0; i < op->inputs_size(); i++) { - // one input - auto *in_var = op->mutable_inputs(i); - std::vector replaced_names; - for (int k = 0; k < in_var->arguments_size(); k++) { // all the arguments - std::string arg_value = in_var->arguments(k); - std::string arg_value_with_id = - arg_value + std::to_string(var2id[arg_value]); - if (input_names_with_id.count(arg_value_with_id)) { - replaced_names.push_back(arg_value); - } else { - replaced_names.push_back(arg_value_with_id); - } - } - in_var->clear_arguments(); - for (size_t k = 0; k < replaced_names.size(); k++) { - in_var->add_arguments(replaced_names[k]); - } - } - var2id.clear(); - for (auto out_var : correspond_node->outputs) { - var2id[out_var->Name()] = out_var->id(); - } - - if (op_desc.Type() == "conv2d") { - auto input_var_name = op_desc.Input("Input").front(); - auto filter_var_name = op_desc.Input("Filter").front(); - auto out_var_name = op_desc.Output("Output").front(); - auto filter_shape = in_vars[filter_var_name]->Var()->GetShape(); - const std::vector strides = - boost::get>(op_desc.GetAttr("strides")); - const std::vector paddings = - boost::get>(op_desc.GetAttr("paddings")); - if (same_hierarchy_conv2d_num_map[input_var_name] > 0) { - (*output_names_with_id) - .insert(out_var_name + std::to_string(var2id[out_var_name])); - (*output_names).insert(out_var_name); - } else if (filter_shape[2] == 1 && filter_shape[3] == 1 && - strides[0] == 1 && strides[1] == 1 && paddings[0] == 0 && - paddings[1] == 0) { - same_hierarchy_conv2d_num_map[input_var_name] += 1; - } - } - - // rename for the output variables of op inside subgraph - for (int i = 0; i < op->outputs_size(); i++) { - framework::proto::OpDesc_Var *out_var = op->mutable_outputs(i); - std::vector replaced_names; - for (int k = 0; k < out_var->arguments_size(); k++) { - std::string arg_value = out_var->arguments(k); - std::string arg_value_with_id = - arg_value + std::to_string(var2id[arg_value]); - if (output_names_with_id->count(arg_value_with_id)) { - (*output_name_map)[arg_value] = arg_value_with_id; - } - replaced_names.push_back(arg_value_with_id); - } - out_var->clear_arguments(); - for (size_t k = 0; k < replaced_names.size(); k++) { - out_var->add_arguments(replaced_names[k]); - } - } + // the following code will NOT run in following situation: + // 1. calibraion mode (generate trt int8 calibraiton table data) + // 2. already load serialized trt engine info. + LOG(INFO) << "Prepare TRT engine (Optimize model structure, Select OP " + "kernel etc). This process may cost a lot of time."; + std::unique_ptr trt_engine( + new tensorrt::TensorRTEngine( + Get("max_batch_size"), Get("workspace_size"), enable_int8, + calibrator.get(), Get("gpu_device_id"))); + auto *scope = param_scope(); + framework::BlockDesc block_desc_temp(nullptr, block_desc.Proto()); + std::unordered_set param_set(params.begin(), params.end()); + inference::Singleton::Global() + .ConvertBlockToTRTEngine( + &block_desc_temp, *scope, + std::vector(input_names.begin(), input_names.end()), + param_set, output_mapping, trt_engine.get()); + nvinfer1::IHostMemory *serialized_engine_data = trt_engine->Serialize(); + trt_engine_serialized_data = + std::string((const char *)serialized_engine_data->data(), + serialized_engine_data->size()); + + if (need_serialize) { + SaveTrtEngineSerializedDataToFile( + GetTrtEngineSerializedPath(Get("model_opt_cache_dir"), + engine_key), + trt_engine_serialized_data); } + SetAttr(op_desc->Proto(), "engine_serialized_data", + trt_engine_serialized_data); } } // namespace analysis diff --git a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h index 6689a668fc9313df4105875477424f1426637226..f043670c5af39c1bdf8d4f00c7294fb53a4c9039 100644 --- a/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h +++ b/paddle/fluid/inference/analysis/ir_passes/tensorrt_subgraph_pass.h @@ -20,6 +20,7 @@ #include #include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/inference/analysis/ir_passes/subgraph_util.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index 85755fc471ae3d37ec5d005882668ccf0c35b354..90f09505c023c656a0b4ffaf9e3ef52152c0f0e7 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -27,6 +27,10 @@ if(WITH_GPU AND TENSORRT_FOUND) set(inference_deps ${inference_deps} tensorrt_engine tensorrt_converter) endif() +if (ANAKIN_FOUND) + set(inference_deps ${inference_deps} anakin_op_converter anakin_engine) +endif() + add_subdirectory(details) cc_library(analysis_config SRCS analysis_config.cc DEPS lod_tensor paddle_pass_builder) diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 1be25de497346913f24eec147a2db58b0f7065f4..7bfdada49664544c829b1f4fc886292b29717c32 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -21,6 +21,7 @@ #include "paddle/fluid/platform/gpu_info.h" namespace paddle { +extern const std::vector kAnakinSubgraphPasses; PassStrategy *AnalysisConfig::pass_builder() const { if (!pass_builder_.get()) { @@ -108,6 +109,10 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { CP_MEMBER(use_mkldnn_); CP_MEMBER(mkldnn_enabled_op_types_); + CP_MEMBER(use_anakin_); + CP_MEMBER(anakin_max_batchsize_); + CP_MEMBER(anakin_max_input_shape_); + // Ir related. CP_MEMBER(enable_ir_optim_); CP_MEMBER(use_feed_fetch_ops_); @@ -230,6 +235,20 @@ void AnalysisConfig::Update() { } } + if (use_anakin_) { + PADDLE_ENFORCE(!use_tensorrt_, + "Anakin sub-graph and TensorRT sub-graph are not allowed to " + "run at the same time!"); + PADDLE_ENFORCE( + use_gpu_, + "Anakin sub-graph engine need gpu, please use the EnableGpu API."); + + pass_builder()->ClearPasses(); + for (const auto &pass : kAnakinSubgraphPasses) { + pass_builder()->AppendPass(pass); + } + } + if (ir_debug_) { pass_builder()->TurnOnDebug(); } @@ -266,7 +285,7 @@ std::string AnalysisConfig::SerializeInfoCache() { ss << specify_input_name_; ss << cpu_math_library_num_threads_; - + ss << use_anakin_; return ss.str(); } @@ -316,6 +335,11 @@ void AnalysisConfig::SetModelBuffer(const char *prog_buffer, Update(); } +void AnalysisConfig::SetEngineOptInfo( + std::map engine_opt_info) { + engine_opt_info_ = engine_opt_info; +} + NativeConfig AnalysisConfig::ToNativeConfig() const { NativeConfig config; config.model_dir = model_dir_; @@ -332,5 +356,12 @@ void AnalysisConfig::SwitchIrDebug(int x) { ir_debug_ = x; Update(); } - +void AnalysisConfig::EnableAnakinEngine( + int max_batch_size, + std::map> max_input_shape) { + anakin_max_batchsize_ = max_batch_size; + anakin_max_input_shape_ = max_input_shape; + use_anakin_ = true; + Update(); +} } // namespace paddle diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index b58c60e96a0bd6695b827e7063fa7a07f42fe586..001e8e66d5560f631dab8dd7c13bbaaef1e6195a 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -40,7 +40,10 @@ #if PADDLE_WITH_TENSORRT #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" #include "paddle/fluid/inference/tensorrt/trt_int8_calibrator.h" +#endif +#if PADDLE_WITH_ANAKIN +#include "paddle/fluid/inference/anakin/convert/op_converter.h" #endif DECLARE_bool(profile); @@ -349,7 +352,10 @@ void AnalysisPredictor::OptimizeInferenceProgram() { argument_.SetStaticMemoryOptimForceUpdate( config_.static_memory_optim_force_update_); argument_.SetModelFromMemory(config_.model_from_memory_); + argument_.SetEngineOptInfo(config_.engine_opt_info_); // Analyze inference_program + argument_.SetUseAnakin(config_.anakin_engine_enabled()); + argument_.SetPredictorID(predictor_id_); if (!config_.model_dir().empty()) { argument_.SetModelDir(config_.model_dir()); } else { @@ -373,6 +379,12 @@ void AnalysisPredictor::OptimizeInferenceProgram() { argument_.SetTensorRtUseStaticEngine(config_.trt_use_static_engine_); } + if (config_.use_gpu() && config_.anakin_engine_enabled()) { + argument_.SetAnakinMaxBatchSize(config_.anakin_max_batchsize_); + argument_.SetAnakinMaxInputShape(config_.anakin_max_input_shape_); + LOG(INFO) << "Anakin subgraph engine is enabled"; + } + if (config_.use_mkldnn_) { LOG(INFO) << "MKLDNN is enabled"; argument_.SetMKLDNNEnabledOpTypes(config_.mkldnn_enabled_op_types_); @@ -402,7 +414,7 @@ std::unique_ptr CreatePaddlePredictor< VLOG(3) << "create AnalysisConfig"; if (config.use_gpu()) { // 1. GPU memory - PADDLE_ENFORCE_GT(config.memory_pool_init_size_mb(), 0.f); + PADDLE_ENFORCE_GE(config.memory_pool_init_size_mb(), 0.f); PADDLE_ENFORCE_GE(config.gpu_device_id(), 0, "Invalid device id %d", config.gpu_device_id()); std::vector flags; @@ -805,3 +817,27 @@ USE_TRT_CONVERTER(prelu); USE_TRT_CONVERTER(conv2d_transpose); USE_TRT_CONVERTER(leaky_relu); #endif + +#if PADDLE_WITH_ANAKIN +USE_ANAKIN_CONVERTER(mul); +USE_ANAKIN_CONVERTER(fc); +USE_ANAKIN_CONVERTER(conv2d); +USE_ANAKIN_CONVERTER(conv2d_fusion); +USE_ANAKIN_CONVERTER(concat); +USE_ANAKIN_CONVERTER(split); +USE_ANAKIN_CONVERTER(relu); +USE_ANAKIN_CONVERTER(sigmoid); +USE_ANAKIN_CONVERTER(tanh); +USE_ANAKIN_CONVERTER(pool2d); +USE_ANAKIN_CONVERTER(elementwise_add); +USE_ANAKIN_CONVERTER(elementwise_mul); +USE_ANAKIN_CONVERTER(batch_norm); +USE_ANAKIN_CONVERTER(flatten); +USE_ANAKIN_CONVERTER(reshape); +USE_ANAKIN_CONVERTER(transpose); +USE_ANAKIN_CONVERTER(softmax); +USE_ANAKIN_CONVERTER(detection_out); +USE_ANAKIN_CONVERTER(density_prior_box); +USE_ANAKIN_CONVERTER(dropout); +USE_ANAKIN_CONVERTER(sum); +#endif diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index 5c0535d63e00c32ef82aa6d804459542d7da3e50..087bfbd002658da28e3097a10933d5be2312ab4f 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -45,7 +45,9 @@ using framework::NaiveExecutor; */ class AnalysisPredictor : public PaddlePredictor { public: - explicit AnalysisPredictor(const AnalysisConfig &config) : config_(config) {} + explicit AnalysisPredictor(const AnalysisConfig &config) : config_(config) { + predictor_id_ = inference::GetUniqueId(); + } ~AnalysisPredictor(); bool Init(const std::shared_ptr &parent_scope, @@ -152,6 +154,7 @@ class AnalysisPredictor : public PaddlePredictor { const size_t max_shape_collect_count_{1000}; int need_collect_var_shapes_{-1}; // -1 for default, 0 for false, 1 for true. std::vector>> batch_var_shapes_; + int predictor_id_; private: // Some status here that help to determine the status inside the predictor. diff --git a/paddle/fluid/inference/api/demo_ci/run.sh b/paddle/fluid/inference/api/demo_ci/run.sh index 963986f245cdafa737d76953f0e5323e4f74e669..bf2e3593c2beadaea2cb08aa3dcc2370c3e06bf4 100755 --- a/paddle/fluid/inference/api/demo_ci/run.sh +++ b/paddle/fluid/inference/api/demo_ci/run.sh @@ -27,7 +27,7 @@ if [ -d "$TENSORRT_INCLUDE_DIR" -a -d "$TENSORRT_LIB_DIR" ]; then fi PREFIX=inference-vis-demos%2F -URL_ROOT=http://paddlemodels.cdn.bcebos.com/${PREFIX} +URL_ROOT=http://paddlemodels.bj.bcebos.com/${PREFIX} # download vis_demo data function download() { diff --git a/paddle/fluid/inference/api/details/zero_copy_tensor.cc b/paddle/fluid/inference/api/details/zero_copy_tensor.cc index 9a40cf4b60a64c3d0452a4367ccb7ac36de6b3b8..937b6398f8131a6cf4e8b0002e38f4513f0f884f 100644 --- a/paddle/fluid/inference/api/details/zero_copy_tensor.cc +++ b/paddle/fluid/inference/api/details/zero_copy_tensor.cc @@ -74,6 +74,21 @@ T *ZeroCopyTensor::data(PaddlePlace *place, int *size) const { return res; } +PaddleDType ZeroCopyTensor::type() const { + EAGER_GET_TENSOR; + auto type = tensor->type(); + if (type == framework::proto::VarType::FP32) { + return PaddleDType::FLOAT32; + } else if (type == framework::proto::VarType::INT64) { + return PaddleDType::INT64; + } else if (type == framework::proto::VarType::INT32) { + return PaddleDType::INT32; + } else { + LOG(ERROR) << "unknown type, only support float32 and int64 now."; + } + return PaddleDType::FLOAT32; +} + template void ZeroCopyTensor::copy_from_cpu(const T *data) { EAGER_GET_TENSOR; @@ -119,6 +134,7 @@ void ZeroCopyTensor::copy_to_cpu(T *data) { static_cast(pool.Get(gpu_place)); memory::Copy(platform::CPUPlace(), static_cast(data), gpu_place, t_data, ele_num * sizeof(T), dev_ctx->stream()); + cudaDeviceSynchronize(); #else PADDLE_THROW("Not compile with CUDA, should not reach here."); #endif diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 9b05c335047d7f9a0c50004e4ff6817ddd53d80f..23df507aa609c018e54524159494b08b7e42c75c 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -14,9 +14,11 @@ #pragma once #include +#include #include #include #include +#include #include /*! \file */ @@ -136,10 +138,20 @@ struct AnalysisConfig { void EnableTensorRtEngine(int workspace_size = 1 << 20, int max_batch_size = 1, int min_subgraph_size = 3, Precision precision = Precision::kFloat32, - bool use_static = true); + bool use_static = false); /** A boolean state telling whether the TensorRT engine is used. */ bool tensorrt_engine_enabled() const { return use_tensorrt_; } + /** + * \brief Turn on the usage of Anakin sub-graph engine. + */ + void EnableAnakinEngine( + int max_batch_size = 1, + std::map> max_input_shape = {}); + + /** A boolean state indicating whether the Anakin sub-graph engine is used. + */ + bool anakin_engine_enabled() const { return use_anakin_; } /** \brief Control whether to debug IR graph analysis phase. * @@ -185,6 +197,7 @@ struct AnalysisConfig { /** A boolean state telling whether the model is set from the CPU memory. */ bool model_from_memory() const { return model_from_memory_; } + void SetEngineOptInfo(std::map engine_opt_info); /** Turn on memory optimize * NOTE still in development, will release latter. @@ -258,6 +271,10 @@ struct AnalysisConfig { std::string serialized_info_cache_; mutable std::unique_ptr pass_builder_; + bool use_anakin_{false}; + int anakin_max_batchsize_; + std::map> anakin_max_input_shape_; + std::map engine_opt_info_; }; } // namespace paddle diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 703fd18069474f28b29c6f16c6308fc19bd3527f..87f40f09eb9bb552bd246cb39bbbd41abac1c9ac 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -177,6 +177,8 @@ class ZeroCopyTensor { device_ = device; } + PaddleDType type() const; + protected: explicit ZeroCopyTensor(void* scope) : scope_{scope} {} void SetName(const std::string& name) { name_ = name; } @@ -191,6 +193,7 @@ class ZeroCopyTensor { // performance. mutable void* tensor_{nullptr}; PaddlePlace place_; + PaddleDType dtype_; int device_; }; diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index d413a418c88241a15808474f753a3900e0a5293e..35dd1176718a3d7e4f3867ce048216ea45e5ba7f 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -68,10 +68,26 @@ void GpuPassStrategy::EnableMKLDNN() { LOG(ERROR) << "GPU not support MKLDNN yet"; } +// The following passes works for Anakin sub-graph engine. +const std::vector kAnakinSubgraphPasses({ + "infer_clean_graph_pass", // + "simplify_anakin_detection_pattern_pass5", // + "simplify_anakin_detection_pattern_pass4", // + "simplify_anakin_detection_pattern_pass3", // + "simplify_anakin_detection_pattern_pass2", // + "anakin_fillconstant_elementwisemul_fuse", // + "fc_fuse_pass", // + "conv_elementwise_add_fuse_pass", // + "conv_bn_fuse_pass", // + "conv_elementwise_add_fuse_pass", // + "fc_gru_fuse_pass", // + "anakin_subgraph_pass", +}); + GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { passes_.assign({ - "infer_clean_graph_pass", // - "identity_scale_op_clean_pass", // + "infer_clean_graph_pass", // + // "identity_scale_op_clean_pass", // "conv_affine_channel_fuse_pass", // "conv_eltwiseadd_affine_channel_fuse_pass", // "conv_bn_fuse_pass", // @@ -84,7 +100,7 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { #endif }); - for (int i = 6; i >= 3; i--) { + for (int i = 6; i >= 2; i--) { passes_.push_back("transpose_flatten" + std::to_string(i) + "_concat_fuse_pass"); } @@ -124,4 +140,5 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { }); use_gpu_ = false; } +void PaddlePassBuilder::ClearPasses() { passes_.clear(); } } // namespace paddle diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index 84645fef018ce41ee2cba7ae25d2b0c13e49dfc0..65403e790e93b1a4f99803472d77b0e7a02a4fec 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -45,6 +45,7 @@ class PaddlePassBuilder { /** Delete all the passes that has type `pass_type`. */ void DeletePass(const std::string &pass_type); + void ClearPasses(); /** Append an analysis pass. */ void AppendAnalysisPass(const std::string &pass); @@ -157,4 +158,6 @@ class GpuPassStrategy : public PassStrategy { virtual ~GpuPassStrategy() = default; }; +extern const std::vector kAnakinSubgraphPasses; + } // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/convert/io_converter.h b/paddle/fluid/inference/tensorrt/convert/io_converter.h index 71c48e085d25d2bc6720d93735f661f9e3af7b40..5daa242f6ab802a50fa6105f0102b817b700f461 100644 --- a/paddle/fluid/inference/tensorrt/convert/io_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/io_converter.h @@ -45,7 +45,7 @@ class EngineIOConverter { static void ConvertInput(const std::string& op_type, const LoDTensor& in, void* out, size_t max_size, cudaStream_t* stream) { PADDLE_ENFORCE(stream != nullptr); - auto* converter = Registry::Lookup( + auto* converter = Registry::Global().Lookup( op_type, "default" /* default_type */); PADDLE_ENFORCE_NOT_NULL(converter); converter->SetStream(stream); @@ -56,7 +56,7 @@ class EngineIOConverter { LoDTensor* out, size_t max_size, cudaStream_t* stream) { PADDLE_ENFORCE(stream != nullptr); - auto* converter = Registry::Lookup( + auto* converter = Registry::Global().Lookup( op_type, "default" /* default_type */); PADDLE_ENFORCE_NOT_NULL(converter); converter->SetStream(stream); @@ -69,12 +69,12 @@ class EngineIOConverter { cudaStream_t* stream_{nullptr}; }; -#define REGISTER_TENSORRT_IO_CONVERTER(op_type__, Converter__) \ - struct trt_io_##op_type__##_converter { \ - trt_io_##op_type__##_converter() { \ - Registry::Register(#op_type__); \ - } \ - }; \ +#define REGISTER_TENSORRT_IO_CONVERTER(op_type__, Converter__) \ + struct trt_io_##op_type__##_converter { \ + trt_io_##op_type__##_converter() { \ + Registry::Global().Register(#op_type__); \ + } \ + }; \ trt_io_##op_type__##_converter trt_io_##op_type__##_converter__; } // namespace tensorrt diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 90ed90b1e2907cc4be6f507890bae8df5a44ee38..55515569ead6e40c9b1b45fe31189dab7e2f2bb4 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -86,7 +86,7 @@ class OpConverter { PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1UL); std::string Y = op_desc.Input("Y")[0]; if (parameters.count(Y)) { - it = Registry::Lookup("fc"); + it = Registry::Global().Lookup("fc"); } } if (op_desc.Type().find("elementwise") != std::string::npos) { @@ -103,28 +103,28 @@ class OpConverter { if (parameters.count(Y)) { PADDLE_ENFORCE(add_weight_op_set.count(op_type) > 0, "Unsupported elementwise type" + op_type); - it = - Registry::Lookup("elementwise_" + op_type + "_weight"); + it = Registry::Global().Lookup("elementwise_" + op_type + + "_weight"); PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", op_desc.Type()); } else { PADDLE_ENFORCE(add_tensor_op_set.count(op_type) > 0, "Unsupported elementwise type" + op_type); - it = - Registry::Lookup("elementwise_" + op_type + "_tensor"); + it = Registry::Global().Lookup("elementwise_" + op_type + + "_tensor"); } PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", op_desc.Type()); } if (op_desc.Type() == "depthwise_conv2d") { - it = Registry::Lookup("conv2d"); + it = Registry::Global().Lookup("conv2d"); PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", op_desc.Type()); } if (!it) { - it = Registry::Lookup(op_desc.Type()); + it = Registry::Global().Lookup(op_desc.Type()); } PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", op_desc.Type()); @@ -198,9 +198,9 @@ class OpConverter { #define REGISTER_TRT_OP_CONVERTER(op_type__, Converter__) \ struct trt_##op_type__##_converter : public ::paddle::framework::Registrar { \ trt_##op_type__##_converter() { \ - ::paddle::inference:: \ - Registry::Register< \ - ::paddle::inference::tensorrt::Converter__>(#op_type__); \ + ::paddle::inference::Registry< \ + paddle::inference::tensorrt::OpConverter>::Global() \ + .Register<::paddle::inference::tensorrt::Converter__>(#op_type__); \ } \ }; \ trt_##op_type__##_converter trt_##op_type__##_converter__; \ diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index d9ac73b0638ad356501a9883b49e65f8f3e32245..2f17a44e0c08ef7d9204a115512a1cd76790efdf 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -115,14 +115,14 @@ inference_analysis_test(test_analyzer_transformer SRCS analyzer_transformer_test # ocr set(OCR_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/ocr") if (NOT EXISTS ${OCR_INSTALL_DIR}) - inference_download_and_uncompress(${OCR_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Focr.tar.gz") + inference_download_and_uncompress(${OCR_INSTALL_DIR} "http://paddlemodels.bj.bcebos.com/" "inference-vis-demos%2Focr.tar.gz") endif() inference_analysis_api_test_with_refer_result(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_tester.cc SERIAL) # mobilenet with transpose op set(MOBILENET_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mobilenet") if (NOT EXISTS ${MOBILENET_INSTALL_DIR}) - inference_download_and_uncompress(${MOBILENET_INSTALL_DIR} "http://paddlemodels.cdn.bcebos.com/" "inference-vis-demos%2Fmobilenet.tar.gz") + inference_download_and_uncompress(${MOBILENET_INSTALL_DIR} "http://paddlemodels.bj.bcebos.com/" "inference-vis-demos%2Fmobilenet.tar.gz") endif() inference_analysis_api_test_with_refer_result(test_analyzer_mobilenet_transpose ${MOBILENET_INSTALL_DIR} analyzer_vis_tester.cc SERIAL) diff --git a/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc b/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc index f765f556112915bcfa07b5361a473d39292f711a..a925da312cde30380b4997b8b76a0d425a71e817 100644 --- a/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_transformer_tester.cc @@ -214,23 +214,28 @@ TEST(Analyzer_Transformer, fuse_statis) { } // Compare result of NativeConfig and AnalysisConfig -void compare(bool use_mkldnn = false) { - AnalysisConfig cfg; - SetConfig(&cfg); - if (use_mkldnn) { - cfg.EnableMKLDNN(); - } - - std::vector> input_slots_all; - SetInput(&input_slots_all); - CompareNativeAndAnalysis( - reinterpret_cast(&cfg), input_slots_all); -} - -TEST(Analyzer_Transformer, compare) { compare(); } -#ifdef PADDLE_WITH_MKLDNN -TEST(Analyzer_Transformer, compare_mkldnn) { compare(true /* use_mkldnn */); } -#endif +// void compare(bool use_mkldnn = false) { +// AnalysisConfig cfg; +// SetConfig(&cfg); +// if (use_mkldnn) { +// cfg.EnableMKLDNN(); +// } +// +// std::vector> input_slots_all; +// SetInput(&input_slots_all); +// CompareNativeAndAnalysis( +// reinterpret_cast(&cfg), +// input_slots_all); +// } + +// TODO(yihuaxu): +// Disable compare and compare_mkldnn temporary, see +// https://github.com/paddlePaddle/Paddle/issues/16316 for details. +// TEST(Analyzer_Transformer, compare) { compare(); } +// #ifdef PADDLE_WITH_MKLDNN +// TEST(Analyzer_Transformer, compare_mkldnn) { compare(true /* use_mkldnn */); +// } +// #endif } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/test.cmake b/paddle/fluid/inference/tests/test.cmake index f551b322fe00892be79dd966235504bb4f54c718..df7af71d9b32ba11822e066f574146cfa5c50edd 100644 --- a/paddle/fluid/inference/tests/test.cmake +++ b/paddle/fluid/inference/tests/test.cmake @@ -1,5 +1,5 @@ include(ExternalProject) -set(INFERENCE_URL "http://paddle-inference-dist.cdn.bcebos.com" CACHE STRING "inference download url") +set(INFERENCE_URL "http://paddle-inference-dist.bj.bcebos.com" CACHE STRING "inference download url") set(INFERENCE_DEMO_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo" CACHE STRING "A path setting inference demo download directories.") diff --git a/paddle/fluid/inference/utils/singleton.h b/paddle/fluid/inference/utils/singleton.h index cfb89e704457a11a3cd6e89dba5efad5acae0bce..990bef359499834c3a7cb025c3fb1d94ceea958e 100644 --- a/paddle/fluid/inference/utils/singleton.h +++ b/paddle/fluid/inference/utils/singleton.h @@ -45,13 +45,13 @@ struct Registry { } template - static void Register(const std::string& name) { + void Register(const std::string& name) { PADDLE_ENFORCE_EQ(items_.count(name), 0); items_[name] = new ItemChild; } - static ItemParent* Lookup(const std::string& name, - const std::string& default_name = "") { + ItemParent* Lookup(const std::string& name, + const std::string& default_name = "") { auto it = items_.find(name); if (it == items_.end()) { if (default_name == "") @@ -70,11 +70,8 @@ struct Registry { private: Registry() = default; - static std::unordered_map items_; + std::unordered_map items_; }; -template -std::unordered_map Registry::items_; - } // namespace inference } // namespace paddle diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 651c5e6e75834c27313abd79a33bedb62ecd2632..afac8e4d2a39cae37e33b02135bc2c41b5ceb0c3 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -34,6 +34,10 @@ if (WITH_GPU AND TENSORRT_FOUND) add_subdirectory(tensorrt) endif() +if (ANAKIN_FOUND) + add_subdirectory(anakin) +endif() + SET(OP_HEADER_DEPS xxhash) if (WITH_GPU) SET(OP_HEADER_DEPS ${OP_HEADER_DEPS} cub) diff --git a/paddle/fluid/operators/add_position_encoding_op.cc b/paddle/fluid/operators/add_position_encoding_op.cc index 8127e554bed1aae7a5ce8837bcadf1b7f13f1ac2..3882bbedaa0be0ba14bca9c4fcb626d5ecaab129 100644 --- a/paddle/fluid/operators/add_position_encoding_op.cc +++ b/paddle/fluid/operators/add_position_encoding_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/add_position_encoding_op.h" +#include namespace paddle { namespace operators { @@ -39,13 +40,8 @@ class AddPositionEncodingOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "X(Input) must not be null."); - PADDLE_ENFORCE(ctx->HasInput("Out"), "Out must not be null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Out@GRAD must not be null."); - - auto out_dims = ctx->GetInputDim("Out"); if (ctx->HasOutput(framework::GradVarName("X"))) { + auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); ctx->SetOutputDim(framework::GradVarName("X"), out_dims); } } @@ -75,6 +71,22 @@ class AddPositionEncodingOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class AddPositionEncodingGradOpDescMaker + : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType("add_position_encoding_grad"); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetAttrMap(Attrs()); + return op; + } +}; + } // namespace operators } // namespace paddle @@ -83,7 +95,7 @@ namespace plt = paddle::platform; REGISTER_OPERATOR(add_position_encoding, ops::AddPositionEncodingOp, ops::AddPositionEncodingOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::AddPositionEncodingGradOpDescMaker); REGISTER_OPERATOR(add_position_encoding_grad, ops::AddPositionEncodingOpGrad); REGISTER_OP_CPU_KERNEL( diff --git a/paddle/fluid/operators/anakin/CMakeLists.txt b/paddle/fluid/operators/anakin/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..5eacefc645bab288da7c289a5d7701abbcbef03d --- /dev/null +++ b/paddle/fluid/operators/anakin/CMakeLists.txt @@ -0,0 +1,2 @@ +op_library(anakin_engine_op DEPS anakin_engine anakin_op_converter) +# file(APPEND ${pybind_file} "USE_NO_KERNEL_OP(anakin_engine);\n") diff --git a/paddle/fluid/operators/anakin/anakin_engine_op.cc b/paddle/fluid/operators/anakin/anakin_engine_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..58db16ea0c1347a366a4d5927e414d76864cb6ab --- /dev/null +++ b/paddle/fluid/operators/anakin/anakin_engine_op.cc @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_CUDA + +#include +#include + +#include "paddle/fluid/operators/anakin/anakin_engine_op.h" + +namespace paddle { + +namespace operators { + +class AnakinEngineOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Xs", "A list of inputs.").AsDuplicable(); + AddOutput("Ys", "A list of outputs").AsDuplicable(); + AddAttr("subgraph", "the subgraph."); + AddAttr( + "engine_key", + "The engine_key here is used to distinguish different TRT Engines"); + AddAttr("sub_block", "the trt block"); + AddComment("Anakin engine operator."); + } +}; + +class AnakinEngineInferVarType : public framework::VarTypeInference { + public: + void operator()(framework::InferVarTypeContext *ctx) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(anakin_engine, ops::AnakinEngineOp, ops::AnakinEngineOpMaker, + ops::AnakinEngineOpMaker); + +#endif // PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/anakin/anakin_engine_op.h b/paddle/fluid/operators/anakin/anakin_engine_op.h new file mode 100644 index 0000000000000000000000000000000000000000..9d5b4f6f54ccfc9802cef6abac428e28a72ac293 --- /dev/null +++ b/paddle/fluid/operators/anakin/anakin_engine_op.h @@ -0,0 +1,162 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#ifdef PADDLE_WITH_CUDA + +#include +#include +#include +#include +#include +#include +#include + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/inference/anakin/convert/op_converter.h" +#include "paddle/fluid/inference/anakin/engine.h" +#include "paddle/fluid/inference/analysis/helper.h" + +namespace paddle { +namespace operators { + +using FluidDT = framework::proto::VarType_Type; +using inference::Singleton; + +using anakin::graph::GraphGlobalMem; +using anakin::AK_FLOAT; +using anakin::Precision; +using anakin::saber::NV; +using anakin::saber::X86; +using anakin::saber::Shape; +using anakin::PBlock; +using anakin::PTuple; +using inference::anakin::AnakinEngine; + +class AnakinEngineOp : public framework::OperatorBase { + using AnakinNvEngineT = AnakinEngine; + + private: + std::vector input_names_; + std::unordered_set param_names_; + mutable AnakinNvEngineT *anakin_engine_; + std::string engine_key_; + std::string engine_serialized_data_; + + public: + AnakinEngineOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) { + input_names_ = Inputs("Xs"); + engine_key_ = Attr("engine_key"); + auto params = Attr>("parameters"); + for (const auto ¶m : params) { + param_names_.insert(param); + } + anakin_engine_ = nullptr; + } + + protected: + void RunImpl(const framework::Scope &scope, + const platform::Place &dev_place) const override { + RunAnakin(scope, dev_place); + } + + void RunAnakin(const framework::Scope &scope, + const platform::Place &dev_place) const { + auto *engine = GetEngine(scope, dev_place); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(dev_place); + auto stream = + reinterpret_cast(dev_ctx).stream(); + + PADDLE_ENFORCE(!input_names_.empty(), "should pass more than one inputs"); + + std::vector output_maps = + Attr>("output_name_mapping"); + + std::map inputs; + // Convert input tensor from fluid to engine. + for (const auto &x : Inputs("Xs")) { + if (param_names_.count(x)) continue; + auto &t = + inference::analysis::GetFromScope(scope, x); + + inputs.insert({x, &t}); + } + + std::map outputs; + int output_index = 0; + for (const auto &y : Outputs("Ys")) { + auto *fluid_v = scope.FindVar(y); + PADDLE_ENFORCE_NOT_NULL(fluid_v, "no output variable called %s", y); + auto *fluid_t = fluid_v->GetMutable(); + outputs.insert({output_maps[output_index], fluid_t}); + output_index += 1; + } + engine->Execute(inputs, outputs, stream); + } + + AnakinNvEngineT *GetEngine(const framework::Scope &scope, + const platform::Place &dev_place) const { + if (anakin_engine_ == nullptr) { + anakin_engine_ = + inference::Singleton::Global() + .Get(engine_key_); + } + + return anakin_engine_; + } + + void Prepare(const framework::Scope &scope, const platform::Place &dev_place, + AnakinNvEngineT *engine) const { + LOG(INFO) << "Prepare Anakin engine (Optimize model structure, Select OP " + "kernel etc). This process may cost a lot of time."; + framework::proto::BlockDesc block_desc; + block_desc.ParseFromString(Attr("subgraph")); + + std::vector output_maps = + Attr>("output_name_mapping"); + + inference::Singleton::Global() + .ConvertBlock(block_desc, param_names_, scope, engine); + engine->Freeze(); + for (const auto &x : Inputs("Xs")) { + if (param_names_.count(x)) continue; + auto &t = + inference::analysis::GetFromScope(scope, x); + auto t_shape = framework::vectorize2int(t.dims()); + // all input shape should be 4 dims + if (t_shape.size() == 2) { + t_shape.push_back(1); + t_shape.push_back(1); + } + engine->SetInputShape(x, t_shape); + } + + engine->Optimize(); + + engine->InitGraph(); + } +}; + +} // namespace operators +} // namespace paddle + +#endif // PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index c0ad959309a7036639c4bc15621a2bd0296526f5..494d26f58f23ad1e445bbe8d7f8ce1037e5aa598 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -586,14 +586,10 @@ std::unique_ptr BatchNormGradMaker::Apply() const { return std::unique_ptr(op); } -class BatchNormInplaceInToOut : public framework::InplaceInToOut { +class BatchNormInplaceInToOut : public framework::InplaceOpInference { public: - using InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { std::unordered_map inplace_in_to_out = { {"Mean", "MeanOut"}, {"Variance", "VarianceOut"}, {"X", "Y"}, }; @@ -601,14 +597,10 @@ class BatchNormInplaceInToOut : public framework::InplaceInToOut { } }; -class BatchNormGradInplaceInToOut : public framework::InplaceInToOut { +class BatchNormGradInplaceInToOut : public framework::InplaceOpInference { public: - using InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { std::unordered_map inplace_in_to_out = { // Scale, Bias, SavedMean, SavedVariance shape is [batch_size, C] {framework::GradVarName("Y"), framework::GradVarName("X")}, diff --git a/paddle/fluid/operators/clip_op.cc b/paddle/fluid/operators/clip_op.cc index a679f7e2536a0a44148193f423f5ffe11b5e35fc..4fc6ae365ec61326670775ab13b854235f19266f 100644 --- a/paddle/fluid/operators/clip_op.cc +++ b/paddle/fluid/operators/clip_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/clip_op.h" +#include namespace paddle { namespace operators { @@ -76,12 +77,28 @@ class ClipOpGrad : public framework::OperatorWithKernel { } }; +class ClipGradOpDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType("clip_grad"); + op->SetInput("X", Input("X")); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetAttrMap(Attrs()); + return op; + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(clip, ops::ClipOp, ops::ClipOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::ClipGradOpDescMaker); REGISTER_OPERATOR(clip_grad, ops::ClipOpGrad); REGISTER_OP_CPU_KERNEL( clip, ops::ClipKernel); diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 6e3c9f28649b9f15a2a78fc832ab5e52986fcf46..1f71555180361a1522b7a1c8383fe128bc4edcd0 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/concat_op.h" +#include #include #include @@ -120,11 +121,7 @@ Examples: class ConcatOpGrad : public framework::OperatorWithKernel { public: - ConcatOpGrad(const std::string &type, - const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorWithKernel(type, inputs, outputs, attrs) {} + using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { auto in_x = "X"; @@ -142,6 +139,33 @@ class ConcatOpGrad : public framework::OperatorWithKernel { } } } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + ctx.Input(framework::GradVarName("Out"))->type(), + ctx.GetPlace()); + } +}; + +DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(ConcatOpGradNoNeedBufferVarInference, + "X"); + +class ConcatGradOpDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType("concat_grad"); + op->SetInput("X", Input("X")); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + op->SetOutput(framework::GradVarName("X"), InputGrad("X", false)); + op->SetAttrMap(Attrs()); + return op; + } }; } // namespace operators @@ -149,9 +173,9 @@ class ConcatOpGrad : public framework::OperatorWithKernel { namespace ops = paddle::operators; REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker, - paddle::framework::DefaultGradOpDescMaker< - false> /* set false to disable empty grad */); -REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad); + ops::ConcatGradOpDescMaker); +REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad, + ops::ConcatOpGradNoNeedBufferVarInference); REGISTER_OP_CPU_KERNEL( concat, ops::ConcatKernel, ops::ConcatKernel, diff --git a/paddle/fluid/operators/controlflow/while_op.cc b/paddle/fluid/operators/controlflow/while_op.cc index deb8ec3bb2d5682e8733365fb865daebbf8405e0..b3219208825cd1aea4c869064ff8f5fa8d3300fd 100644 --- a/paddle/fluid/operators/controlflow/while_op.cc +++ b/paddle/fluid/operators/controlflow/while_op.cc @@ -51,6 +51,7 @@ class WhileOp : public framework::OperatorBase { void RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const override { PADDLE_ENFORCE_NOT_NULL(scope.FindVar(Input(kCondition))); + auto &cond = scope.FindVar(Input(kCondition))->Get(); PADDLE_ENFORCE_EQ(cond.dims(), paddle::framework::make_ddim({1})); @@ -70,13 +71,34 @@ class WhileOp : public framework::OperatorBase { VLOG(2) << GetSkipEagerDeletionVarsDebugString(skip_vars); auto ctx = executor.Prepare(*program, block->ID(), skip_vars); - while (cond.data()[0]) { + if (!is_test) { + while (cond.data()[0]) { + auto ¤t_scope = scope.NewScope(); + step_scopes->push_back(¤t_scope); + executor.RunPreparedContext(ctx.get(), ¤t_scope, false, true, + true); + } + } else { auto ¤t_scope = scope.NewScope(); - step_scopes->push_back(¤t_scope); - executor.RunPreparedContext(ctx.get(), ¤t_scope, false, true, true); - if (is_test) { - scope.DeleteScope(¤t_scope); + executor.CreateVariables(*program, ¤t_scope, block->ID()); + while (cond.data()[0]) { + for (auto &name : current_scope.LocalVarNames()) { + auto *var = current_scope.Var(name); + if (var->IsType()) { + // Clear all lod information for all lod_tensors. + auto *t = var->GetMutable(); + framework::LoD empty_lod; + t->set_lod(empty_lod); + } else if (var->IsType()) { + // Clear elements of all tensor arrays. + auto *t = var->GetMutable(); + t->clear(); + } + } + executor.RunPreparedContext(ctx.get(), ¤t_scope, false, false, + false); } + scope.DeleteScope(¤t_scope); } } }; diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index c6121d00dae4007f2fcaf57b0945d3f34233781d..619e12e6ba7c73e46beafadd50770aedfb52c964 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -455,13 +455,13 @@ framework::OpKernelType ConvOpGrad::GetExpectedKernelType( return type; } -class Conv2dGradMaker : public framework::SingleGradOpDescMaker { +class Conv2DGradMaker : public framework::SingleGradOpDescMaker { public: using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; std::unique_ptr Apply() const override { auto* op = new framework::OpDesc(); - op->SetType(GradOpType()); + op->SetType(this->ForwardOpType() + "_grad"); op->SetInput("Input", Input("Input")); op->SetInput("Filter", Input("Filter")); op->SetInput("Bias", Input("Bias")); @@ -470,14 +470,33 @@ class Conv2dGradMaker : public framework::SingleGradOpDescMaker { op->SetOutput(framework::GradVarName("Input"), InputGrad("Input")); op->SetOutput(framework::GradVarName("Filter"), InputGrad("Filter")); op->SetOutput(framework::GradVarName("Bias"), InputGrad("Bias")); - op->SetAttrMap(Attrs()); return std::unique_ptr(op); } +}; + +class Conv3DGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; - virtual std::string GradOpType() const { - return this->ForwardOpType() + "_grad"; + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType(this->ForwardOpType() + "_grad"); + op->SetInput("Input", Input("Input")); + op->SetInput("Filter", Input("Filter")); + op->SetInput(framework::GradVarName("Output"), OutputGrad("Output")); + + op->SetOutput(framework::GradVarName("Input"), InputGrad("Input")); + op->SetOutput(framework::GradVarName("Filter"), InputGrad("Filter")); + + if (ForwardOp().Inputs().count("ResidualData") != 0) { + op->SetInput("ResidualData", Input("ResidualData")); + } + + op->SetAttrMap(Attrs()); + + return std::unique_ptr(op); } }; @@ -486,17 +505,16 @@ class Conv2dGradMaker : public framework::SingleGradOpDescMaker { namespace ops = paddle::operators; REGISTER_OPERATOR(conv2d, ops::ConvOp, ops::Conv2DOpMaker, - ops::ConvOpInferVarType, ops::Conv2dGradMaker); + ops::ConvOpInferVarType, ops::Conv2DGradMaker); REGISTER_OPERATOR(conv2d_grad, ops::ConvOpGrad); // depthwise convolution op REGISTER_OPERATOR(depthwise_conv2d, ops::ConvOp, ops::Conv2DOpMaker, - ops::ConvOpInferVarType, ops::Conv2dGradMaker); + ops::ConvOpInferVarType, ops::Conv2DGradMaker); REGISTER_OPERATOR(depthwise_conv2d_grad, ops::ConvOpGrad); REGISTER_OPERATOR(conv3d, ops::ConvOp, ops::Conv3DOpMaker, - ops::ConvOpInferVarType, - paddle::framework::DefaultGradOpDescMaker); + ops::ConvOpInferVarType, ops::Conv3DGradMaker); REGISTER_OPERATOR(conv3d_grad, ops::ConvOpGrad); // depthwise conv kernel diff --git a/paddle/fluid/operators/crop_op.cc b/paddle/fluid/operators/crop_op.cc index 97d20681b8136c13d512c0b86a7ff15b24367db2..78fcd07e1df8d590ad2a4508bbc82477d928c6e9 100644 --- a/paddle/fluid/operators/crop_op.cc +++ b/paddle/fluid/operators/crop_op.cc @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/crop_op.h" -#include +#include +#include +#include namespace paddle { namespace operators { @@ -178,12 +180,31 @@ class CropOpGrad : public framework::OperatorWithKernel { } }; +class CropGradOpDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType("crop_grad"); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + op->SetInput("X", Input("X")); + if (ForwardOp().Inputs().count("Offsets") > 0) { + op->SetInput("Offsets", Input("Offsets")); + } + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetAttrMap(Attrs()); + return op; + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(crop, ops::CropOp, ops::CropOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::CropGradOpDescMaker); REGISTER_OPERATOR(crop_grad, ops::CropOpGrad); REGISTER_OP_CPU_KERNEL( crop, ops::CropKernel); diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index a617b9fb1d948340d25853252be79fdd08fe0438..ad32de53e7019b438b7106ddd031a8f00bd79b5d 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -238,6 +238,23 @@ class CrossEntropyGradientOp : public CrossEntropyGradientOpBase { } }; +class CrossEntropyGradOpDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType("cross_entropy_grad"); + op->SetInput("X", Input("X")); + op->SetInput("Label", Input("Label")); + op->SetInput(framework::GradVarName("Y"), OutputGrad("Y")); + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetAttrMap(Attrs()); + return op; + } +}; + class CrossEntropyOp2 : public CrossEntropyOpBase { public: using CrossEntropyOpBase::CrossEntropyOpBase; @@ -354,7 +371,7 @@ using CPUCtx = paddle::platform::CPUDeviceContext; REGISTER_OPERATOR(cross_entropy, ops::CrossEntropyOpBase, ops::CrossEntropyOpMaker, ops::CrossEntropyOpInferVarType, - paddle::framework::DefaultGradOpDescMaker); + ops::CrossEntropyGradOpDescMaker); REGISTER_OPERATOR(cross_entropy_grad, ops::CrossEntropyGradientOp); REGISTER_OP_CPU_KERNEL(cross_entropy, ops::CrossEntropyOpKernel, ops::CrossEntropyOpKernel); diff --git a/paddle/fluid/operators/cudnn_lstm_op.cc b/paddle/fluid/operators/cudnn_lstm_op.cc index e63d57be57a66e8e02f7ef88acd01246302bc53c..134f84d59cafa661fce727adc3303444c4ef483e 100644 --- a/paddle/fluid/operators/cudnn_lstm_op.cc +++ b/paddle/fluid/operators/cudnn_lstm_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include #include "paddle/fluid/framework/op_registry.h" @@ -170,11 +171,6 @@ class CudnnLSTMGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasInput("Input"), "Input(Input) of LSTM should not be null."); PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("last_h"), - "Input(last_h) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("last_c"), - "Input(last_c) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Cache"), "Input(last_c) of LSTM should not be null."); PADDLE_ENFORCE(ctx->HasInput("InitH"), @@ -197,6 +193,35 @@ class CudnnLSTMGradOp : public framework::OperatorWithKernel { } }; +class CudnnLSTMGradOpDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType("cudnn_lstm_grad"); + op->SetInput("Input", Input("Input")); + op->SetInput("InitH", Input("InitH")); + op->SetInput("InitC", Input("InitC")); + op->SetInput("W", Input("W")); + if (ForwardOp().Inputs().count("Cache") > 0) { + op->SetInput("Cache", Input("Cache")); + } + op->SetInput("Out", Output("Out")); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + op->SetInput(framework::GradVarName("last_c"), OutputGrad("last_c")); + op->SetInput(framework::GradVarName("last_h"), OutputGrad("last_h")); + + op->SetOutput(framework::GradVarName("Input"), InputGrad("Input")); + op->SetOutput(framework::GradVarName("W"), InputGrad("W")); + op->SetOutput(framework::GradVarName("InitH"), InputGrad("InitH")); + op->SetOutput(framework::GradVarName("InitC"), InputGrad("InitC")); + op->SetAttrMap(Attrs()); + return op; + } +}; + template class NotImpleKernel : public framework::OpKernel { public: @@ -211,7 +236,7 @@ class NotImpleKernel : public framework::OpKernel { namespace ops = paddle::operators; REGISTER_OPERATOR(cudnn_lstm, ops::CudnnLSTMOp, ops::CudnnLSTMOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::CudnnLSTMGradOpDescMaker); REGISTER_OPERATOR(cudnn_lstm_grad, ops::CudnnLSTMGradOp); REGISTER_OP_CPU_KERNEL(cudnn_lstm, ops::NotImpleKernel); diff --git a/paddle/fluid/operators/distributed/parameter_prefetch.cc b/paddle/fluid/operators/distributed/parameter_prefetch.cc index c63d65348880ebb4085d83059d9fead6456216d7..65295c2c103ceca50d9de3ae314246256497d084 100644 --- a/paddle/fluid/operators/distributed/parameter_prefetch.cc +++ b/paddle/fluid/operators/distributed/parameter_prefetch.cc @@ -14,6 +14,7 @@ #include #include +#include #include #include "paddle/fluid/operators/distributed/parameter_prefetch.h" @@ -218,7 +219,7 @@ void prefetch(const std::string& id_name, const std::string& out_name, boost::get(id_tensor.place()), id_tensor.data(), sizeof(int64_t) * id_tensor.numel(), stream); - for (size_t i = 0; i < cpu_tensor.numel(); ++i) { + for (int64_t i = 0; i < cpu_tensor.numel(); ++i) { ids_vector.push_back(cpu_tensor_data[i]); } #endif diff --git a/paddle/fluid/operators/elementwise/elementwise_add_op.cc b/paddle/fluid/operators/elementwise/elementwise_add_op.cc index c6c658236c235f0a6767924026b0a7610071e918..2b3fc06dcb79b8c6b46de7abf51bdb2c47acca1c 100644 --- a/paddle/fluid/operators/elementwise/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_add_op.cc @@ -16,8 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace ops = paddle::operators; REGISTER_ELEMWISE_GRAD_MAKER(elementwise_add, Add); -REGISTER_ELEMWISE_EXPLICIT_OP(elementwise_add, "Add", "Out = X + Y", "Out", - "X"); +REGISTER_ELEMWISE_EXPLICIT_OP(elementwise_add, "Add", "Out = X + Y"); REGISTER_OP_CPU_KERNEL( elementwise_add, diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index 91e44152658d87750f0b6d5826c481904085e086..6dbb9072495f743a4df1ff05e029a227c2cf618b 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#include #include +#include #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -250,43 +252,31 @@ class ElemwiseGradKernel : public framework::OpKernel { } }; -class ElementwiseOpInplace : public framework::InplaceInToOut { +class ElementwiseOpInplace : public framework::InplaceOpInference { public: - using framework::InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { return std::unordered_map{ {"X", "Out"}, }; } }; -class ElementwiseGradOpInplace : public framework::InplaceInToOut { +class ElementwiseGradOpInplace : public framework::InplaceOpInference { public: - using framework::InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { - std::unordered_map ret; - if (block->HasVar(framework::GradVarName("X")) && - block->HasVar(framework::GradVarName("Out"))) { - ret[framework::GradVarName("Out")] = framework::GradVarName("X"); - } - return ret; + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { + return std::unordered_map{ + {framework::GradVarName("Out"), framework::GradVarName("X")}, + }; } }; +DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(ElementwiseGradNoBufVarsInference, "Y"); + } // namespace operators } // namespace paddle -/* -*/ - #define REGISTER_ELEMWISE_GRAD_MAKER(kernel_type, op_name) \ class kernel_type##GradMaker \ : public paddle::framework::SingleGradOpDescMaker { \ @@ -320,18 +310,19 @@ class ElementwiseGradOpInplace : public framework::InplaceInToOut { ::paddle::framework::DefaultGradOpDescMaker); \ REGISTER_OPERATOR(op_type##_grad, ::paddle::operators::ElementwiseOpGrad) -#define REGISTER_ELEMWISE_EXPLICIT_OP(op_type, op_name, equation, ...) \ - class __ElemwiseOp##op_type##Maker__ \ - : public ::paddle::operators::ElementwiseOpMaker { \ - protected: \ - virtual std::string GetName() const { return op_name; } \ - virtual std::string GetEquation() const { return equation; } \ - }; \ - REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \ - __ElemwiseOp##op_type##Maker__, \ - ::paddle::operators::ElementwiseOpInferVarType, \ - op_type##GradMaker, \ - ::paddle::operators::ElementwiseOpInplace); \ - REGISTER_OPERATOR(op_type##_grad, \ - ::paddle::operators::ElementwiseOpExplicitGrad, \ - ::paddle::operators::ElementwiseGradOpInplace) +#define REGISTER_ELEMWISE_EXPLICIT_OP(op_type, op_name, equation) \ + class __ElemwiseOp##op_type##Maker__ \ + : public ::paddle::operators::ElementwiseOpMaker { \ + protected: \ + virtual std::string GetName() const { return op_name; } \ + virtual std::string GetEquation() const { return equation; } \ + }; \ + REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \ + __ElemwiseOp##op_type##Maker__, \ + ::paddle::operators::ElementwiseOpInferVarType, \ + op_type##GradMaker, \ + ::paddle::operators::ElementwiseOpInplace); \ + REGISTER_OPERATOR(op_type##_grad, \ + ::paddle::operators::ElementwiseOpExplicitGrad, \ + ::paddle::operators::ElementwiseGradOpInplace, \ + ::paddle::operators::ElementwiseGradNoBufVarsInference) diff --git a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc index efc66374c812cbd07adef6ac25c9616b880ec383..04c87c1b2ac398f8f75265c80bef5326aea15dce 100644 --- a/paddle/fluid/operators/elementwise/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise/elementwise_sub_op.cc @@ -16,8 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_op.h" namespace ops = paddle::operators; REGISTER_ELEMWISE_GRAD_MAKER(elementwise_sub, Sub); -REGISTER_ELEMWISE_EXPLICIT_OP(elementwise_sub, "Sub", "Out = X - Y", "Out", - "X"); +REGISTER_ELEMWISE_EXPLICIT_OP(elementwise_sub, "Sub", "Out = X - Y"); REGISTER_OP_CPU_KERNEL( elementwise_sub, diff --git a/paddle/fluid/operators/flatten_op.cc b/paddle/fluid/operators/flatten_op.cc index bb904166c4a19997a57723d9f2e50cc839aae960..7f43a1cfe977a63b5ffb6bd8dc96bf696ed15282 100644 --- a/paddle/fluid/operators/flatten_op.cc +++ b/paddle/fluid/operators/flatten_op.cc @@ -267,14 +267,10 @@ class Flatten2GradOp : public framework::OperatorBase { } }; -class FlattenOpInplaceInToOut : public framework::InplaceInToOut { +class FlattenOpInplaceInToOut : public framework::InplaceOpInference { public: - using InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { std::unordered_map inplace_in_to_out = { {"X", "Out"}, }; @@ -282,13 +278,10 @@ class FlattenOpInplaceInToOut : public framework::InplaceInToOut { } }; -class FlattenGradInplaceinToOut : public framework::InplaceInToOut { - using InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { +class FlattenGradInplaceinToOut : public framework::InplaceOpInference { + public: + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { std::unordered_map inplace_in_to_out = { {framework::GradVarName("Out"), framework::GradVarName("X")}, }; diff --git a/paddle/fluid/operators/fsp_op.cc b/paddle/fluid/operators/fsp_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..fbe8e56a6160219175bd573a2ff186eb35e56fdf --- /dev/null +++ b/paddle/fluid/operators/fsp_op.cc @@ -0,0 +1,128 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fsp_op.h" + +namespace paddle { +namespace operators { + +class FSPOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of FSPOp should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of FSPOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of FSPOp should not be null."); + + auto x_dims = ctx->GetInputDim("X"); + auto y_dims = ctx->GetInputDim("Y"); + + PADDLE_ENFORCE( + x_dims.size() == 4, + "The Input(X) must have shape [batch_size, channel, height, width]."); + PADDLE_ENFORCE( + y_dims.size() == 4, + "The Input(Y) must have shape [batch_size, channel, height, width]."); + PADDLE_ENFORCE( + (x_dims[2] == y_dims[2]) && (x_dims[3] == y_dims[3]), + "The Input(X) and Input(Y) should have the same height and width."); + + ctx->SetOutputDim("Out", {x_dims[0], x_dims[1], y_dims[1]}); + ctx->ShareLoD("X", "Out"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + framework::LibraryType library_{framework::LibraryType::kPlain}; + framework::DataLayout layout_ = framework::DataLayout::kAnyLayout; + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.device_context(), layout_, library_); + } +}; + +class FSPOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor) The input of FSP op with shape [batch_size, x_channel, " + "height, width]"); + AddInput("Y", + "(Tensor) The input of FSP op with shape" + "[batch_size, y_channel, height, width]." + "The y_channel can be different with the x_channel of Input(X)" + " while the other dimensions must be the same with Input(X)'s."); + AddOutput( + "Out", + "(Tensor) The output of FSP op with shape " + "[batch_size, x_channel, y_channel]. The x_channel is the channel " + "of Input(X) and the y_channel is the channel of Input(Y)."); + AddComment(R"DOC( + This op is used to calculate the flow of solution procedure (FSP) matrix of two feature maps. + Given feature map x with shape [x_channel, h, w] and feature map y with shape + [y_channel, h, w], we can get the fsp matrix of x and y in two steps: + + step 1: reshape x into matrix with shape [x_channel, h * w] and reshape and + transpose y into matrix with shape [h * w, y_channel] + step 2: multiply x and y to get fsp matrix with shape [x_channel, y_channel] + + The output is a batch of fsp matrices. + )DOC"); + } +}; + +class FSPOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null"); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + auto x_dims = ctx->GetInputDim("X"); + auto y_dims = ctx->GetInputDim("Y"); + auto x_grad_name = framework::GradVarName("X"); + auto y_grad_name = framework::GradVarName("Y"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + } + if (ctx->HasOutput(y_grad_name)) { + ctx->SetOutputDim(y_grad_name, y_dims); + } + } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + ctx.Input(framework::GradVarName("Out"))->type(), + ctx.device_context()); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(fsp, ops::FSPOp, ops::FSPOpMaker, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(fsp_grad, ops::FSPOpGrad); +REGISTER_OP_CPU_KERNEL( + fsp, ops::FSPOpKernel, + ops::FSPOpKernel); +REGISTER_OP_CPU_KERNEL( + fsp_grad, ops::FSPGradOpKernel, + ops::FSPGradOpKernel); diff --git a/paddle/fluid/operators/fsp_op.cu b/paddle/fluid/operators/fsp_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..4fd7ba04ff9af1806963427ad58c68fc216e82ac --- /dev/null +++ b/paddle/fluid/operators/fsp_op.cu @@ -0,0 +1,24 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/fsp_op.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; +REGISTER_OP_CUDA_KERNEL(fsp, ops::FSPOpKernel, + ops::FSPOpKernel); +REGISTER_OP_CUDA_KERNEL(fsp_grad, + ops::FSPGradOpKernel, + ops::FSPGradOpKernel); diff --git a/paddle/fluid/operators/fsp_op.h b/paddle/fluid/operators/fsp_op.h new file mode 100644 index 0000000000000000000000000000000000000000..544af2b7d9b9729fe5dce08793da6c983fbcc6fa --- /dev/null +++ b/paddle/fluid/operators/fsp_op.h @@ -0,0 +1,136 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class FSPOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* x = context.Input("X"); + auto* y = context.Input("Y"); + auto* output = context.Output("Out"); + output->mutable_data(context.GetPlace()); + auto x_dims = x->dims(); + auto y_dims = y->dims(); + + auto batch_size = x_dims[0]; + auto x_channel = x_dims[1]; + auto y_channel = y_dims[1]; + auto height = x_dims[2]; + auto width = x_dims[3]; + + auto blas = math::GetBlas(context); + + math::MatDescriptor x_mat_desc; + x_mat_desc.height_ = x_channel; + x_mat_desc.width_ = height * width; + x_mat_desc.batch_size_ = batch_size; + x_mat_desc.stride_ = x_channel * height * width; + + math::MatDescriptor y_mat_desc; + y_mat_desc.height_ = height * width; + y_mat_desc.width_ = y_channel; + y_mat_desc.batch_size_ = batch_size; + y_mat_desc.stride_ = y_channel * height * width; + y_mat_desc.trans_ = true; + + blas.MatMul(*x, x_mat_desc, *y, y_mat_desc, + static_cast(1.0 / (height * width)), output, + static_cast(0.0)); + } +}; + +template +class FSPGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* d_x = context.Output(framework::GradVarName("X")); + auto* d_y = context.Output(framework::GradVarName("Y")); + if (d_x == nullptr && d_y == nullptr) { + return; + } + auto* d_out = context.Input(framework::GradVarName("Out")); + auto d_out_dims = d_out->dims(); + auto batch_size = d_out_dims[0]; + auto x_channel = d_out_dims[1]; + auto y_channel = d_out_dims[2]; + int64_t h = 0; + int64_t w = 0; + + auto blas = math::GetBlas(context); + math::SetConstant set_zero; + if (d_x != nullptr) { + d_x->mutable_data(context.GetPlace()); + set_zero(context.template device_context(), d_x, + static_cast(0)); + auto* y = context.Input("Y"); + auto y_dims = y->dims(); + h = y_dims[2]; + w = y_dims[3]; + + math::MatDescriptor d_out_mat_desc; + d_out_mat_desc.height_ = x_channel; + d_out_mat_desc.width_ = y_channel; + d_out_mat_desc.batch_size_ = batch_size; + d_out_mat_desc.stride_ = x_channel * y_channel; + + math::MatDescriptor y_mat_desc; + y_mat_desc.height_ = y_channel; + y_mat_desc.width_ = h * w; + y_mat_desc.batch_size_ = batch_size; + y_mat_desc.stride_ = y_channel * h * w; + + blas.MatMul(*d_out, d_out_mat_desc, *y, y_mat_desc, + static_cast(1.0 / (h * w)), d_x, static_cast(0.0)); + } + + if (d_y != nullptr) { + d_y->mutable_data(context.GetPlace()); + set_zero(context.template device_context(), d_y, + static_cast(0)); + auto* x = context.Input("X"); + auto x_dims = x->dims(); + h = x_dims[2]; + w = x_dims[3]; + + math::MatDescriptor d_out_mat_desc; + d_out_mat_desc.height_ = y_channel; + d_out_mat_desc.width_ = x_channel; + d_out_mat_desc.batch_size_ = batch_size; + d_out_mat_desc.stride_ = x_channel * y_channel; + d_out_mat_desc.trans_ = true; + + math::MatDescriptor x_mat_desc; + x_mat_desc.height_ = x_channel; + x_mat_desc.width_ = h * w; + x_mat_desc.batch_size_ = batch_size; + x_mat_desc.stride_ = x_channel * h * w; + + blas.MatMul(*d_out, d_out_mat_desc, *x, x_mat_desc, + static_cast(1.0 / (h * w)), d_y, static_cast(0.0)); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/gather.cu.h b/paddle/fluid/operators/gather.cu.h index e4df59c5d51c390cf593add0c5562665c91f33f6..5bc2e63757f19c1dc8a7d41fae9621a2816ff31b 100644 --- a/paddle/fluid/operators/gather.cu.h +++ b/paddle/fluid/operators/gather.cu.h @@ -64,6 +64,7 @@ void GPUGather(const platform::DeviceContext& ctx, const Tensor& src, for (int i = 1; i < src_dims.size(); ++i) slice_size *= src_dims[i]; const T* p_src = src.data(); + // why must be int? const int* p_index = index.data(); T* p_output = output->data(); diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index 55cef93aacd43174edefbb8aa740bcbea3d8feef..91f3818f2165c91eef88921859afe5703bd65685 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -13,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/gather_op.h" +#include +#include +#include #include "paddle/fluid/framework/ddim.h" namespace paddle { @@ -59,8 +62,9 @@ class GatherGradOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { - return framework::OpKernelType(ctx.Input("X")->type(), - ctx.device_context()); + return framework::OpKernelType( + ctx.Input(framework::GradVarName("Out"))->type(), + ctx.device_context()); } }; @@ -94,13 +98,34 @@ Out = [[3, 4], )DOC"); } }; + +class GatherGradOpDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType("gather_grad"); + op->SetInput("Index", Input("Index")); + op->SetInput("X", Input("X")); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetAttrMap(Attrs()); + return op; + } +}; + +DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(GatherGradNoNeedBufferVarInference, "X"); + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(gather, ops::GatherOp, ops::GatherOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(gather_grad, ops::GatherGradOp); + ops::GatherGradOpDescMaker); +REGISTER_OPERATOR(gather_grad, ops::GatherGradOp, + ops::GatherGradNoNeedBufferVarInference); REGISTER_OP_CPU_KERNEL(gather, ops::GatherOpKernel, ops::GatherOpKernel, ops::GatherOpKernel, ops::GatherOpKernel, diff --git a/paddle/fluid/operators/group_norm_op.cc b/paddle/fluid/operators/group_norm_op.cc index cbdffa0db8277dbf7257c3b3c1d03c1b459d5b2b..2ab40f482d7a1463703085037bcb94fd4aecf377 100644 --- a/paddle/fluid/operators/group_norm_op.cc +++ b/paddle/fluid/operators/group_norm_op.cc @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/group_norm_op.h" +#include #include +#include namespace paddle { namespace operators { @@ -170,26 +172,18 @@ class GroupNormGradMaker : public framework::SingleGradOpDescMaker { } }; -class GroupNormInplaceInToOut : public framework::InplaceInToOut { +class GroupNormInplaceInToOut : public framework::InplaceOpInference { public: - using InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { return {{"X", "Y"}}; } }; -class GroupNormGradInplaceInToOut : public framework::InplaceInToOut { +class GroupNormGradInplaceInToOut : public framework::InplaceOpInference { public: - using InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { return {{framework::GradVarName("Y"), framework::GradVarName("X")}}; } }; diff --git a/paddle/fluid/operators/jit/README.en.md b/paddle/fluid/operators/jit/README.en.md index 8670ec2ff28ac8353217e0ee2f8c9b784e488ac7..7d4dc6d47a512ee7ed75d99800968a38de98f090 100644 --- a/paddle/fluid/operators/jit/README.en.md +++ b/paddle/fluid/operators/jit/README.en.md @@ -1,7 +1,7 @@ # JIT Kernel JIT(Just In Time) Kernel contains actually generated code and some other implemenations with the same logic. -Each implementations has its own condition to use, defined in `UseMe`. +Each implementation has its own condition to use, defined in `CanBeUsed`. They are combined together to get the best performance of one single independent function. They could be some very simple functions like vector multiply, or some complicated functions like LSTM. And they can be composed with some other exited jit kernels to build up a complex function. @@ -42,35 +42,62 @@ All basical definations of jit kernels are addressed in `paddle/fluid/operators/ ## How to use -One simple function `jit::Get`, which is very easy to use, is supported to get the kernel. -It can automatically return the expected function with best performance under the given attributes. -All kernels are inlcuded in `paddle/fluid/operators/jit/kernels.h`, you can only include this one header to get all the registered kernels. +We present these methods to get the functions: +- `GetAllCandidateFuncs`. It can return all the implementations supported. All of the implementations can get the same result. You can do some runtime benchmark to choose which should actually be used. +- `GetDefaultBestFunc`. It only return one default function pointer, which is tuning offline with some genenal configures and attributes. This should cover most situations. +- `KernelFuncs::Cache()`. It can get the default functions and save it for next time with the same attribute. +- `GetReferFunc`. It can only get the reference code in CPU, and all the others implementations have same logic with this reference code. + +And here are some examples: + +Get from cache: + +```cpp + using T = float; + jit::seq_pool_attr_t attr(width, jit::SeqPoolType::kSum); + auto seqpool_func = jit::KernelFuncs, platform::CPUPlace>::Cache().At(attr); + seqpool_func(src_data, dst_data, &attr); +``` + +Get all implementations and run once: + +```cpp + using T = float; + jit::seq_pool_attr_t attr(width, jit::SeqPoolType::kSum); + auto funcs = jit::GetAllCandidateFuncsWithTypes, platform::CPUPlace>(attr); + for (auto f : funcs) { + LOG(INFO) << "Kernel implementation type: " << f.first; + f.second(src_data, dst_data, &attr); + } +``` + +All kernels are inlcuded in `paddle/fluid/operators/jit/kernels.h`, which is automatically generated in compile time, you can only include this one header to get all the registered kernels. ## Solid Test - Unit Test All functions should be compared with the corresponding reference functions, including data tyep `float` and `double`. - Benchmark - All functions should be tested, and make sure the `jit::Get` function obtain the best performance with all attributes. + All functions should be tested, and make sure the `jit::GetDefaultBestFunc` function obtain the best performance with all attributes. # How to add new kernel ## Required 1. Add `your_key` at `KernelType`. -2. Add reference function of `your_key`. +2. Add your new `KernelTuple` which must include `your_key`. It should be a combination of the data type, attribute type and function type. You can refer `SeqPoolTuple`. +3. Add reference function of `your_key`. Note: - this should be run on CPU and do not depend on any third-party. - Add `USE_JITKERNEL_REFER(your_key)` in `refer/CmakeLists.txt` to make sure this code can be used. -3. Add unit test in `test.cc`, and verfiy at least `float` and `double`. +4. Add unit test in `test.cc`, and verfiy at least `float` and `double`. Test more data type for some special functions if necessary, for example `int8`. -4. Add functions in `benchmark.cc` to test all function of same `KernelType`. Make sure `jit::Get` always get the best one. +5. Add functions in `benchmark.cc` to test all function of same `KernelType`. Make sure `GetDefaultBestFunc` always get the best one. ## Optional Add more implementations of `your_kery` for performance enhancement. -1. Add functions based on generated code in `gen`. It should be derived from `JitCode` and should have corepsonding creator from `JitCodeCreator` which will be registered on the `your_key`. -Note: Add new `KernelTuples` if necessary,your can refer to `XYZNTuples`. -Specialie method `JitCodeKey` when add new attribute type。 -2. Add more functions in `more`,you can use any third party you wish, like mkl, mkldnn or intrinsic code to reach the best performance. +1. Add functions based on generated code in `gen`. It should be derived from `JitCode` and should have correpsonding creator from `JitCodeCreator` which will be registered on the `your_key`. +2. If new attribute type is added, you should specialize `JitCodeKey` of this type. +3. Add more functions in `more`,you can use any third party you wish, like mkl, mkldnn or intrinsic code to reach the best performance. diff --git a/paddle/fluid/operators/jit/README.md b/paddle/fluid/operators/jit/README.md index cc19f09f56ddf6a7c74d6605ab3f1bd059f19bb8..770548c5260f73f038f52e0b06b77ba698851997 100644 --- a/paddle/fluid/operators/jit/README.md +++ b/paddle/fluid/operators/jit/README.md @@ -1,7 +1,7 @@ # JIT Kernel 结合函数模板和JIT生成需要的kernel函数。 -这里的kernel是比Operator中kernel更小级别的算子单元,更侧重的是在不同硬件上的性能。可以有多重第三方库的实现,每种实现有自己的`UseMe`函数负责什么条件下可以被调用。 +这里的kernel是比Operator中kernel更小级别的算子单元,更侧重的是在不同硬件上的性能。可以有多重第三方库的实现,每种实现有自己的`CanBeUsed`函数负责什么条件下可以被调用。 这里实现的函数可以非常细粒度的函数方法,比如Vector MUL, 也可以是一个复杂的逻辑比如LSTM等。复杂的逻辑也可以由自己的底层函数拼接而成。 目前仅支持CPU上的高性能计算。 @@ -39,27 +39,55 @@ PaddlePaddle/Paddle/paddle/fluid/ ## 动态获取 -提供一个`jit::Get`方法,根据kernel类别获取,每种实现都有自己的使用范围,根据范围动态和当前条件选择需要的kernel函数。 +- 提供`GetAllCandidateFuncs`方法,根据输入的kernel类别,获取满足要求的所有函数实现。所有实现保证结果一致,但是速度不一致,可以根据具体输入属性大小,动态测试得到当前最优实现,手动选择最优函数。 +- 提供`GetDefaultBestFunc`方法,返回一个默认最优的函数实现。该函数是根据一些通用配置离线tuning之后的结果,能覆盖大多数情况下最优结果。 +- 提供`KernelFuncs::Cache()`方法,该方法会返回默认最优的函数,同时会缓存该函数指针,如果出现属性一致的情况,直接返回上次的函数指针,如果不存在则根据属性新建。 +- 提供`GetReferFunc` 方法,返回该kernel最原始的逻辑函数。该方法与kernel的输入大小和属性没有任何关系,有且并只有一个在CPU上的实现。该方法表征了kernel的原始逻辑,其他所有实现的逻辑与它保持一致。 + +### 例子 + +所有kernel的调用只需要在头文件中包含`"paddle/fluid/operators/jit/kernels.h"`, 该文件是编译时自动生成的。 + +直接从缓存中获取默认最优的函数。 + +```cpp + using T = float; + jit::seq_pool_attr_t attr(width, jit::SeqPoolType::kSum); + auto seqpool_func = jit::KernelFuncs, platform::CPUPlace>::Cache().At(attr); + seqpool_func(src_data, dst_data, &attr); +``` + +跑一遍所有实现,并输出实现类别。 + +```cpp + using T = float; + jit::seq_pool_attr_t attr(width, jit::SeqPoolType::kSum); + auto funcs = jit::GetAllCandidateFuncsWithTypes, platform::CPUPlace>(attr); + for (auto f : funcs) { + LOG(INFO) << "Kernel implementation type: " << f.first; + f.second(src_data, dst_data, &attr); + } +``` ## 测试 - 逻辑测试 所有实现都要与refer的code对比,需要满足精度要求, 包括float和double的数据类型 - 性能测试 - 所有实现的性能对比,并且与最终的`jit::Get`方法对比,该方法拿到的性能需要在各种条件下都是最好的。 + 所有实现的性能对比,并且与最终的`jit::GetDefaultBestFunc`方法对比,该方法拿到的性能需要在各种条件下都是最好的。 # 如何添加新的算子 -- 在`KernelType` 中添加 `your_key` . -- 实现Reference 的逻辑,这个是必须是在CPU上的实现,并且不能依赖任何第三方库。实现后在`refer/CmakeLists.txt`中添加`USE_JITKERNEL_REFER(your_key)`来使用该kernel. -- (optional) 实现更多的算法在`more`目录下,可以依赖mkl,intrinsic或者mkldnn等第三方库。 -- (optional) 实现基于Xbyak的生成code,在`gen`目下。 jitcode需要实现自己的`JitCodeCreator`,并注册在与refer相同的`KernelType`上。 -- 必要时可以添加新的`KernelTuples`,可以参考`XYZNTuples`,新加的Attr类型需要特例化`JitCodeKey`方法。 -- 在`test.cc`中添加unit test,至少需要测试`float`和`double`两种数据类型,如有必要需要支持额外的数据类型,比如`int8`的相关函数。 -- 在`benchmark.cc`中添加相应的性能对比,同一种kernel需要对比所有实现,并且确保`jit::Get`得到的实现一直是速度最快的。 +1. 在`KernelType` 中添加 `your_key` 。 +2. 实现Reference 的逻辑,这个是必须是在CPU上的实现,并且不能依赖任何第三方库。实现后在`refer/CmakeLists.txt`中添加`USE_JITKERNEL_REFER(your_key)`来使用该kernel。 +3. (optional) 实现更多的算法在`more`目录下,可以依赖mkl,intrinsic或者mkldnn等第三方库。 +4. (optional) 实现基于Xbyak的生成code,在`gen`目下。 jitcode需要实现自己的`JitCodeCreator`,并注册在与refer相同的`KernelType`上。 +5. 添加新的`KernelTuple`,需要与`KernelType`一一对应,是所有类型的一个打包,包括数据类型,属性的类型,以及返回的函数类型。可以参考`SeqPoolTuple`,新加的Attr类型需要特例化`JitCodeKey`方法。 +6. 在`test.cc`中添加unit test,至少需要测试`float`和`double`两种数据类型,如有必要需要支持额外的数据类型,比如`int8`的相关函数。 +7. 在`benchmark.cc`中添加相应的性能对比,同一种kernel需要对比所有实现,并且确保`GetDefaultBestFunc`得到的实现一直是速度最快的。 # 优点 -- 统一的Get方法,接口简单。 +- 接口方便,灵活调用。 - 同一套逻辑可以有多套实现,可以依赖多套第三方库,互不影响。 - 目录结构清晰,不会在某个文件中有多个宏定义,导致的可读性差问题。 - 优化方便,可以直接针对某种属性针对性优化,并不影响其他属性下的性能。 diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index 2948cf71a911b296f8cee7ff9a2fb75f644dbe71..63d3f809f263588bc1fbcd9ee4305e2ce9321e38 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -88,4 +88,5 @@ REGISTER_OP_CPU_KERNEL( ops::LoadCombineOpKernel, ops::LoadCombineOpKernel, ops::LoadCombineOpKernel, + ops::LoadCombineOpKernel, ops::LoadCombineOpKernel); diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index 2d8e6ca854b55e01dacd1e0e7898ba59ea6078dc..656728c609eb19f90390d9dec72d9e30fd3040fd 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -64,4 +64,5 @@ REGISTER_OP_CPU_KERNEL( load, ops::LoadOpKernel, ops::LoadOpKernel, ops::LoadOpKernel, + ops::LoadOpKernel, ops::LoadOpKernel); diff --git a/paddle/fluid/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc index a814c365d70ae91490e7fb50a0baf8fec05d97ef..e0ab02cd90cdee848250a6aba882b0cb0c17abd7 100644 --- a/paddle/fluid/operators/lod_reset_op.cc +++ b/paddle/fluid/operators/lod_reset_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/lod_reset_op.h" +#include namespace paddle { namespace operators { @@ -146,18 +147,39 @@ class LoDResetGradOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType(ctx.Input("X")->type(), - ctx.device_context()); + return framework::OpKernelType( + ctx.Input(framework::GradVarName("Out"))->type(), + ctx.device_context()); } }; +class LoDResetGradDescMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + std::unique_ptr op(new framework::OpDesc()); + op->SetType("lod_reset_grad"); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + op->SetInput("X", Input("X")); + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetAttrMap(Attrs()); + return op; + } +}; + +DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(LoDResetGradNoNeedBufferVarInference, + "X"); + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(lod_reset, ops::LoDResetOp, ops::LoDResetOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(lod_reset_grad, ops::LoDResetGradOp); + ops::LoDResetGradDescMaker); +REGISTER_OPERATOR(lod_reset_grad, ops::LoDResetGradOp, + ops::LoDResetGradNoNeedBufferVarInference); REGISTER_OP_CPU_KERNEL( lod_reset, ops::LoDResetKernel, ops::LoDResetKernel, diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 8d1ebe6b1ce3374d21f0cdfff21ca27929398e8e..d635fc617bc63e1f625e93d21886f6ad134947f6 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -33,7 +33,7 @@ class LookupTableOp : public framework::OperatorWithKernel { auto table_dims = ctx->GetInputDim("W"); auto ids_dims = ctx->GetInputDim("Ids"); int ids_rank = ids_dims.size(); - + VLOG(5) << "ids rank is " << ids_rank << std::endl; PADDLE_ENFORCE_EQ(table_dims.size(), 2); PADDLE_ENFORCE_EQ(ids_dims[ids_rank - 1], 1, "The last dimension of the 'Ids' tensor must be 1."); diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 7c284312df912ad758f6fffc44f111dfe765feb8..5ee1206175600cd668ccbbf5b98053708a4406d3 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -17,7 +17,9 @@ function(reader_library TARGET_NAME) PARENT_SCOPE) endfunction() +cc_library(py_reader SRCS py_reader.cc DEPS reader) cc_library(buffered_reader SRCS buffered_reader.cc DEPS reader simple_threadpool) + reader_library(open_files_op SRCS open_files_op.cc DEPS buffered_reader) reader_library(create_random_data_generator_op SRCS create_random_data_generator_op.cc) reader_library(create_shuffle_reader_op SRCS create_shuffle_reader_op.cc) @@ -26,7 +28,7 @@ reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_o reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc DEPS buffered_reader) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) reader_library(create_custom_reader_op SRCS create_custom_reader_op.cc) -reader_library(create_py_reader_op SRCS create_py_reader_op.cc) +reader_library(create_py_reader_op SRCS create_py_reader_op.cc DEPS py_reader) if (NOT WIN32 AND NOT ON_INFER) cc_library(ctr_reader SRCS ctr_reader.cc DEPS gzstream reader zlib) @@ -38,7 +40,7 @@ cc_test(reader_blocking_queue_test SRCS reader_blocking_queue_test.cc) # Export local libraries to parent # set(READER_LIBRARY ${LOCAL_READER_LIBS} PARENT_SCOPE) -op_library(read_op) +op_library(read_op DEPS py_reader buffered_reader) foreach(src ${LOCAL_READER_LIBS}) set(OP_LIBRARY ${src} ${OP_LIBRARY} CACHE INTERNAL "op libs") diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h index 51b980acb5a08d431d96a3a92479dec09119c27e..78d238aa6115265023d5d87c01048a87180448d0 100644 --- a/paddle/fluid/operators/reader/blocking_queue.h +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -16,6 +16,7 @@ #include // NOLINT #include +#include #include "paddle/fluid/platform/enforce.h" @@ -34,7 +35,7 @@ class BlockingQueue { explicit BlockingQueue(size_t capacity, bool speed_test_mode = false) : capacity_(capacity), speed_test_mode_(speed_test_mode), closed_(false) { PADDLE_ENFORCE_GT( - capacity_, 0, + capacity_, static_cast(0), "The capacity of a reader::BlockingQueue must be greater than 0."); } diff --git a/paddle/fluid/operators/reader/buffered_reader.cc b/paddle/fluid/operators/reader/buffered_reader.cc index 134807092d59329ce93381da67a98b8230db5767..c24e9aedc4ebd8f4fa9e483b1c1cc71fe0bf0aa7 100644 --- a/paddle/fluid/operators/reader/buffered_reader.cc +++ b/paddle/fluid/operators/reader/buffered_reader.cc @@ -30,8 +30,10 @@ BufferedReader::~BufferedReader() { #ifdef PADDLE_WITH_CUDA if (platform::is_gpu_place(place_)) { platform::SetDeviceId(boost::get(place_).device); - PADDLE_ENFORCE(cudaStreamDestroy(stream)); - for (auto &event : events) PADDLE_ENFORCE(cudaEventDestroy(event)); + PADDLE_ENFORCE(cudaStreamDestroy(stream_)); + for (auto &event : events_) { + PADDLE_ENFORCE(cudaEventDestroy(event)); + } } #endif } @@ -46,15 +48,15 @@ BufferedReader::BufferedReader( #ifdef PADDLE_WITH_CUDA if (platform::is_gpu_place(place_)) { platform::SetDeviceId(boost::get(place_).device); - compute_stream = + compute_stream_ = ((platform::CUDADeviceContext *)(platform::DeviceContextPool::Instance() .Get(place_))) ->stream(); - events.resize(buffer_size); - for (auto &event : events) { + events_.resize(buffer_size); + for (auto &event : events_) { PADDLE_ENFORCE(cudaEventCreateWithFlags(&event, cudaEventDisableTiming)); } - PADDLE_ENFORCE(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); + PADDLE_ENFORCE(cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking)); } #endif cpu_buffer_.resize(buffer_size); @@ -73,7 +75,7 @@ void BufferedReader::ReadAsync(size_t i) { #ifdef PADDLE_WITH_CUDA if (platform::is_gpu_place(place_)) { platform::SetDeviceId(boost::get(place_).device); - PADDLE_ENFORCE(cudaEventRecord(events[i], compute_stream)); + PADDLE_ENFORCE(cudaEventRecord(events_[i], compute_stream_)); } #endif position_.emplace(thread_pool_.enqueue([this, i]() -> size_t { @@ -91,7 +93,7 @@ void BufferedReader::ReadAsync(size_t i) { // commands from different streams cannot run concurrently. if (platform::is_gpu_place(place_)) { platform::SetDeviceId(boost::get(place_).device); - PADDLE_ENFORCE(cudaStreamWaitEvent(stream, events[i], 0)); + PADDLE_ENFORCE(cudaStreamWaitEvent(stream_, events_[i], 0)); TensorVec &gpu = gpu_buffer_[i]; gpu.resize(cpu.size()); platform::RecordEvent record_event("BufferedReader:MemoryCopy"); @@ -106,12 +108,14 @@ void BufferedReader::ReadAsync(size_t i) { if (platform::is_cuda_pinned_place(cpu_place)) { memory::Copy(boost::get(place_), gpu_ptr, boost::get(cpu_place), - cpu_ptr, size, stream); + cpu_ptr, size, stream_); } else if ((platform::is_gpu_place(cpu_place))) { memory::Copy(boost::get(place_), gpu_ptr, boost::get(cpu_place), cpu_ptr, - size, stream); + size, stream_); } else { + // if cpu place is not pinned, async copy is slower than sync copy, + // so we use sync copy instead. // TODO(zcd): The default stream should not be used here. memory::Copy(boost::get(place_), gpu_ptr, boost::get(cpu_place), cpu_ptr, size, @@ -119,7 +123,7 @@ void BufferedReader::ReadAsync(size_t i) { } gpu[i].set_lod(cpu[i].lod()); } - PADDLE_ENFORCE(cudaStreamSynchronize(stream)); + PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); } #endif return i; diff --git a/paddle/fluid/operators/reader/buffered_reader.h b/paddle/fluid/operators/reader/buffered_reader.h index 87680da01a1f51cfdfe4d100508440eda9d1877f..5f8b2d47c22d0a15d53c8d30d39608fd64d4bddd 100644 --- a/paddle/fluid/operators/reader/buffered_reader.h +++ b/paddle/fluid/operators/reader/buffered_reader.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include #include "ThreadPool.h" @@ -63,9 +64,9 @@ class BufferedReader : public framework::DecoratedReader { std::vector gpu_buffer_; size_t prev_pos_{-1UL}; #ifdef PADDLE_WITH_CUDA - cudaStream_t stream; - cudaStream_t compute_stream; - std::vector events; + cudaStream_t stream_; + cudaStream_t compute_stream_; + std::vector events_; #endif }; diff --git a/paddle/fluid/operators/reader/create_py_reader_op.cc b/paddle/fluid/operators/reader/create_py_reader_op.cc index 901a92ab5b5c74b071be8b57a7653d90e2a4fb29..4a6581bbbd00019db33896371adac6d4e420e48c 100644 --- a/paddle/fluid/operators/reader/create_py_reader_op.cc +++ b/paddle/fluid/operators/reader/create_py_reader_op.cc @@ -12,37 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" +#include "paddle/fluid/operators/reader/py_reader.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { namespace operators { namespace reader { -class PyReader : public framework::FileReader { - public: - explicit PyReader(const std::shared_ptr& queue) - : framework::FileReader() { - PADDLE_ENFORCE(queue != nullptr, "LoDTensorBlockingQueue must not be null"); - queue_ = queue; - } - - void ReadNext(std::vector* out) override { - bool success; - *out = queue_->Pop(&success); - if (!success) out->clear(); - } - - ~PyReader() { queue_->Close(); } - - void Shutdown() override { queue_->Close(); } - - void Start() override { queue_->ReOpen(); } - - private: - std::shared_ptr queue_; -}; - class CreatePyReaderOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; diff --git a/paddle/fluid/operators/reader/ctr_reader.h b/paddle/fluid/operators/reader/ctr_reader.h index 740cd5219c70331d1f71d832adef084c148a2408..0860fb845976c02562a181139e27bd1912a7c179 100644 --- a/paddle/fluid/operators/reader/ctr_reader.h +++ b/paddle/fluid/operators/reader/ctr_reader.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -152,7 +153,7 @@ class CTRReader : public framework::FileReader { queue_->ReOpen(); VLOG(3) << "reopen success"; VLOG(3) << "thread_num " << thread_num_; - for (int thread_id = 0; thread_id < thread_num_; thread_id++) { + for (size_t thread_id = 0; thread_id < thread_num_; thread_id++) { read_threads_.emplace_back(new std::thread(std::bind( &ReadThread, file_groups_[thread_id], data_desc_, static_cast(thread_id), &read_thread_status_, queue_))); diff --git a/paddle/fluid/operators/reader/py_reader.cc b/paddle/fluid/operators/reader/py_reader.cc new file mode 100644 index 0000000000000000000000000000000000000000..155ae859defcf20a5e226a4abfb99dc308dfb23c --- /dev/null +++ b/paddle/fluid/operators/reader/py_reader.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reader/py_reader.h" +#include + +namespace paddle { +namespace operators { +namespace reader { + +PyReader::PyReader(const std::shared_ptr& queue) + : framework::FileReader() { + PADDLE_ENFORCE(queue != nullptr, "LoDTensorBlockingQueue must not be null"); + queue_ = queue; +} + +void PyReader::ReadNext(std::vector* out) { + bool success; + *out = queue_->Pop(&success); + if (!success) out->clear(); +} + +PyReader::~PyReader() { queue_->Close(); } + +void PyReader::Shutdown() { queue_->Close(); } + +void PyReader::Start() { queue_->ReOpen(); } + +} // namespace reader +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reader/py_reader.h b/paddle/fluid/operators/reader/py_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..43079075142e8db22c0e3b7c86de4249d447f961 --- /dev/null +++ b/paddle/fluid/operators/reader/py_reader.h @@ -0,0 +1,45 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include "paddle/fluid/framework/reader.h" +#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" + +namespace paddle { +namespace operators { +namespace reader { + +class PyReader : public framework::FileReader { + public: + explicit PyReader(const std::shared_ptr& queue); + + void ReadNext(std::vector* out) override; + + ~PyReader(); + + void Shutdown() override; + + void Start() override; + + private: + std::shared_ptr queue_; +}; + +} // namespace reader +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 2b429380fbfc007f5936bff96e0924d93abc81f5..5165af6a253e7f57c1e27cc017f2a0cbc1f70f38 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -322,14 +322,10 @@ class Reshape2GradOp : public framework::OperatorWithKernel { } }; -class ReshapeOpInplaceInToOut : public framework::InplaceInToOut { +class ReshapeOpInplaceInToOut : public framework::InplaceOpInference { public: - using InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { std::unordered_map inplace_in_to_out = { {"X", "Out"}, }; @@ -337,13 +333,10 @@ class ReshapeOpInplaceInToOut : public framework::InplaceInToOut { } }; -class ReshapeGradInplaceInToOut : public framework::InplaceInToOut { - using InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc &op_desc, - framework::BlockDesc *block) const override { +class ReshapeGradInplaceInToOut : public framework::InplaceOpInference { + public: + std::unordered_map operator()( + const framework::OpDesc &op_desc) const override { std::unordered_map inplace_in_to_out = { {framework::GradVarName("Out"), framework::GradVarName("X")}, }; diff --git a/paddle/fluid/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc index 62b1e09737a4af4d0fe08eafcb3b2999d97032c1..953e2655d13328b986a67398dca54f8a5e3aedcf 100644 --- a/paddle/fluid/operators/save_combine_op.cc +++ b/paddle/fluid/operators/save_combine_op.cc @@ -19,11 +19,27 @@ limitations under the License. */ namespace paddle { namespace operators { +using Tensor = framework::Tensor; + class SaveCombineOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext *ctx) const override {} + void InferShape(framework::InferShapeContext* ctx) const override {} + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(framework::proto::VarType::FP32, + ctx.GetPlace()); + } + // TODO(lujun): The override here is just to bypass transform + // in operator impl, which is not elegant enough. + framework::OpKernelType GetKernelTypeForVar( + const std::string& var_name, const Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const override { + return expected_kernel_type; + } }; class SaveCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { @@ -54,7 +70,7 @@ to a file on disk. "(string)" "The \"file_path\" where the LoDTensor variables will be saved.") .AddCustomChecker( - [](const std::string &path) { return !path.empty(); }); + [](const std::string& path) { return !path.empty(); }); } }; @@ -70,5 +86,4 @@ REGISTER_OP_CPU_KERNEL( save_combine, ops::SaveCombineOpKernel, ops::SaveCombineOpKernel, - ops::SaveCombineOpKernel, - ops::SaveCombineOpKernel); + ops::SaveCombineOpKernel); diff --git a/paddle/fluid/operators/save_combine_op.cu b/paddle/fluid/operators/save_combine_op.cu index bc4478b51b111518439fe250a70b8dee0df53ad9..78607823a0368d216310bbbb390fd7face002839 100644 --- a/paddle/fluid/operators/save_combine_op.cu +++ b/paddle/fluid/operators/save_combine_op.cu @@ -20,6 +20,4 @@ REGISTER_OP_CUDA_KERNEL( save_combine, ops::SaveCombineOpKernel, ops::SaveCombineOpKernel, - ops::SaveCombineOpKernel, - ops::SaveCombineOpKernel, - ops::SaveCombineOpKernel); + ops::SaveCombineOpKernel); diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 8fbf299a7c056aff3bfd4cbd3e3cc28fd3c6ccf2..db44bd394a2ce280c06274f728dcf95d266f94cf 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -14,7 +14,9 @@ limitations under the License. */ #include "paddle/fluid/operators/softmax_op.h" +#include #include +#include #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/cudnn_helper.h" @@ -199,14 +201,10 @@ class SoftmaxOpGradMaker : public framework::SingleGradOpDescMaker { } }; -class SoftmaxInplaceInToOut : public framework::InplaceInToOut { +class SoftmaxInplaceInToOut : public framework::InplaceOpInference { public: - using framework::InplaceInToOut::InplaceInToOut; - - protected: - std::unordered_map Apply( - const framework::OpDesc& op_desc, - framework::BlockDesc* block) const override { + std::unordered_map operator()( + const framework::OpDesc& op_desc) const override { return std::unordered_map{ {"X", "Out"}, }; diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index d54a3e8670e892f4e0d9ebb60ab26714ac8c0c68..48002a7620221aad90926ccd3eb89319d6e516b4 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -212,6 +212,7 @@ class EigenCudaStreamDevice : public Eigen::StreamInterface { CudnnHolder::CudnnHolder(const cudaStream_t* stream, const CUDAPlace& place) : workspace_(nullptr), stream_(stream), place_(place) { + PADDLE_ENFORCE(cudaSetDevice(place_.device)); PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_)); PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, *stream_)); } @@ -252,10 +253,6 @@ CUDADeviceContext::CUDADeviceContext(CUDAPlace place) #endif } - if (dynload::HasCUDNN()) { - cudnn_holder_.reset(new CudnnHolder(&stream_, place)); - } - driver_version_ = GetCUDADriverVersion(place_.device); runtime_version_ = GetCUDARuntimeVersion(place_.device); @@ -348,12 +345,21 @@ bool CUDADeviceContext::tensor_core_available() const { return cublas_tensor_core_handle_ != nullptr; } +CudnnHolder* CUDADeviceContext::cudnn_holder() const { + std::call_once(init_cudnn_, [&]() { + if (dynload::HasCUDNN()) { + cudnn_holder_.reset(new CudnnHolder(&stream_, place_)); + } + }); + return cudnn_holder_.get(); +} + cudnnHandle_t CUDADeviceContext::cudnn_handle() const { - return cudnn_holder_->cudnn_handle(); + return cudnn_holder()->cudnn_handle(); } CudnnWorkspaceHandle CUDADeviceContext::cudnn_workspace_handle() const { - return CudnnWorkspaceHandle(cudnn_holder_.get()); + return CudnnWorkspaceHandle(cudnn_holder()); } cudaStream_t CUDADeviceContext::stream() const { return stream_; } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 9dbc72f561b04b3005e2ef029e0c4ea6c2c312b1..778f6613bd49dfbc46e8888cd53b1a4de5fe923d 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -292,9 +292,11 @@ class CUDADeviceContext : public DeviceContext { private: CUDAPlace place_; + mutable std::once_flag init_cudnn_; + std::unique_ptr eigen_device_; std::unique_ptr eigen_stream_; - std::unique_ptr cudnn_holder_; + mutable std::unique_ptr cudnn_holder_; cudaStream_t stream_; std::unique_ptr cublas_handle_; @@ -317,6 +319,7 @@ class CUDADeviceContext : public DeviceContext { // StreamCallbackManager is thread-safe std::unique_ptr callback_manager_; + CudnnHolder* cudnn_holder() const; DISABLE_COPY_AND_ASSIGN(CUDADeviceContext); }; diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index f1385f57184eceec49b791cf6c89641b098f036a..0991eff0fdaaca80ada2d8dd3c68eba72fd3f6e6 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -5,7 +5,7 @@ set(PYBIND_DEPS pybind python proto_desc memory executor async_executor prune if(WITH_PYTHON) list(APPEND PYBIND_DEPS py_func_op) endif() -set(PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc async_executor_py.cc imperative.cc ir.cc inference_api.cc) +set(PYBIND_SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc reader_py.cc async_executor_py.cc imperative.cc ir.cc inference_api.cc) if(WITH_PYTHON) if(WITH_AMD_GPU) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a57083a1444a164cdeecf7e3e6eff6dc0e1e7be7..7bf089637862c969d27f957a630469d1644222bf 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -24,6 +24,7 @@ limitations under the License. */ #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/garbage_collector.h" #include "paddle/fluid/framework/ir/pass_builder.h" #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor.h" @@ -55,6 +56,7 @@ limitations under the License. */ #include "paddle/fluid/pybind/ir.h" #include "paddle/fluid/pybind/protobuf.h" #include "paddle/fluid/pybind/pybind.h" // NOLINT +#include "paddle/fluid/pybind/reader_py.h" #include "paddle/fluid/pybind/recordio.h" #include "paddle/fluid/pybind/tensor_py.h" @@ -128,11 +130,17 @@ static inline bool IsSamePlace(const PlaceType1 &p1, const PlaceType2 &p2) { return paddle::platform::Place(p1) == paddle::platform::Place(p2); } +template +static inline int PlaceIndex(const PlaceType &p) { + return static_cast(paddle::platform::Place(p).which()); +} + PYBIND11_MODULE(core, m) { // Not used, just make sure cpu_info.cc is linked. paddle::platform::CpuTotalPhysicalMemory(); paddle::memory::allocation::UseAllocatorStrategyGFlag(); + m.doc() = "C++ core of PaddlePaddle"; // using framework in this function. Since it is inside a function, it will @@ -147,6 +155,11 @@ PYBIND11_MODULE(core, m) { return paddle::operators::AppendPythonCallableObjectAndReturnId(py_obj); }); + // NOTE(zjl): ctest would load environment variables at the beginning even + // though we have not `import paddle.fluid as fluid`. So we add this API + // to enable eager deletion mode in unittest. + m.def("_set_eager_deletion_mode", &paddle::framework::SetEagerDeletionMode); + m.add_object("_cleanup", py::capsule([]() { ScopePool::Instance().Clear(); })); @@ -229,6 +242,7 @@ PYBIND11_MODULE(core, m) { self.forward_id_ = forward_id; }, py::return_value_policy::reference) + .def_property_readonly("type", &imperative::OpBase::Type) .def_property( "backward_id", [](const imperative::OpBase &self) { return self.backward_id_; }, @@ -274,6 +288,8 @@ PYBIND11_MODULE(core, m) { py::class_(m, "Tensor", py::buffer_protocol()) .def_buffer( [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) + .def("_is_initialized", + [](const Tensor &self) { return self.IsInitialized(); }) .def("_get_dims", [](const Tensor &self) { return vectorize(self.dims()); }) .def("_set_dims", @@ -340,7 +356,8 @@ PYBIND11_MODULE(core, m) { .def("_set_double_element", TensorSetElement) .def("_get_double_element", TensorGetElement) .def("_place", [](Tensor &self) { return self.place(); }) - .def("_dtype", [](Tensor &self) { return self.type(); }); + .def("_dtype", [](Tensor &self) { return self.type(); }) + .def("__getitem__", PySliceTensor, py::return_value_policy::reference); py::class_(m, "LoDTensor", R"DOC( LoDTensor is a Tensor with optional LoD information. @@ -492,6 +509,13 @@ PYBIND11_MODULE(core, m) { Returns: out (bool): whether the lod is valid. + )DOC") + .def("__getitem__", PySliceTensor, py::return_value_policy::reference, + R"DOC( + Slice the original Tensor, and remove the LoD information. + + Returns: + out (Tensor): new Tensor(NOT LoDTensor). )DOC"); py::class_(m, "SelectedRows") @@ -531,6 +555,7 @@ PYBIND11_MODULE(core, m) { All parameter, weight, gradient are variables in Paddle. )DOC") + .def(py::init<>()) .def("is_int", [](const Variable &var) { return var.IsType(); }) .def("set_int", [](Variable &var, int val) -> void { *var.GetMutable() = val; }) @@ -572,14 +597,13 @@ All parameter, weight, gradient are variables in Paddle. }, py::return_value_policy::reference); - py::class_(m, "Reader", "") - .def("start", &framework::ReaderHolder::Start) - .def("reset", &framework::ReaderHolder::ResetAll); + BindReader(&m); using LoDTensorBlockingQueue = ::paddle::operators::reader::LoDTensorBlockingQueue; using LoDTensorBlockingQueueHolder = ::paddle::operators::reader::LoDTensorBlockingQueueHolder; + py::class_>( m, "LoDTensorBlockingQueue", "") .def("push", @@ -666,7 +690,8 @@ All parameter, weight, gradient are variables in Paddle. .def("drop_kids", &Scope::DropKids, R"DOC( Delete all sub-scopes of the current scope. - )DOC"); + )DOC") + .def("_kids", &Scope::kids); m.def("Scope", []() -> Scope * { @@ -763,7 +788,11 @@ All parameter, weight, gradient are variables in Paddle. #if (defined(PADDLE_WITH_CUDA) && !defined(_WIN32)) py::class_(m, "Communicator").def(py::init<>()); #endif - py::class_(m, "CUDAPlace") + py::class_(m, "CUDAPlace", R"DOC( + CUDAPlace is a descriptor of a device. It represents a GPU, and each CUDAPlace + has a dev_id to indicate the number of cards represented by the current CUDAPlace. + The memory of CUDAPlace with different dev_id is not accessible. + )DOC") .def("__init__", [](platform::CUDAPlace &self, int dev_id) { #ifdef PADDLE_WITH_CUDA @@ -776,6 +805,7 @@ All parameter, weight, gradient are variables in Paddle. PADDLE_THROW("Cannot use CUDAPlace in CPU only version"); #endif }) + .def("_type", &PlaceIndex) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) @@ -783,8 +813,12 @@ All parameter, weight, gradient are variables in Paddle. &IsSamePlace) .def("__str__", string::to_string); - py::class_(m, "CPUPlace") + py::class_(m, "CPUPlace", R"DOC( + CPUPlace is a descriptor of a device. It represents a CPU, and the memory + CPUPlace can be accessed by CPU. + )DOC") .def(py::init<>()) + .def("_type", &PlaceIndex) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) @@ -792,7 +826,10 @@ All parameter, weight, gradient are variables in Paddle. &IsSamePlace) .def("__str__", string::to_string); - py::class_(m, "CUDAPinnedPlace") + py::class_(m, "CUDAPinnedPlace", R"DOC( + CUDAPinnedPlace is a descriptor of a device. The memory of CUDAPinnedPlace + can be accessed by GPU and CPU. + )DOC") .def("__init__", [](platform::CUDAPinnedPlace &self) { #ifndef PADDLE_WITH_CUDA @@ -800,6 +837,7 @@ All parameter, weight, gradient are variables in Paddle. #endif new (&self) platform::CUDAPinnedPlace(); }) + .def("_type", &PlaceIndex) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) @@ -811,16 +849,25 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "Place") .def(py::init<>()) + .def("_type", &PlaceIndex) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) .def("_equals", &IsSamePlace) .def("is_gpu_place", [](platform::Place &self) { return platform::is_gpu_place(self); }) + .def("is_cpu_place", + [](platform::Place &self) { return platform::is_cpu_place(self); }) + .def("is_cuda_pinned_place", + [](platform::Place &self) { + return platform::is_cuda_pinned_place(self); + }) .def("gpu_device_id", [](platform::Place &self) { return boost::get(self).device; }) + .def("set_place", [](platform::Place &self, + const platform::Place &other) { self = other; }) .def("set_place", [](platform::Place &self, const platform::CPUPlace &cpu_place) { self = cpu_place; diff --git a/paddle/fluid/pybind/reader_py.cc b/paddle/fluid/pybind/reader_py.cc new file mode 100644 index 0000000000000000000000000000000000000000..af7d30552ed47c0fbe26090b328cc7128b90f84d --- /dev/null +++ b/paddle/fluid/pybind/reader_py.cc @@ -0,0 +1,161 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pybind/reader_py.h" +#include +#include +#include +#include +#include +#include "paddle/fluid/framework/reader.h" +#include "paddle/fluid/operators/reader/buffered_reader.h" +#include "paddle/fluid/operators/reader/py_reader.h" +#include "paddle/fluid/platform/place.h" +#include "pybind11/stl.h" + +namespace paddle { +namespace pybind { + +class MultiDeviceFeedReader { + public: + using ResultDictList = + std::vector>; + + MultiDeviceFeedReader( + const std::shared_ptr &queue, + const std::vector &names, + const std::vector &dst_places, bool use_double_buffer) + : queue_(queue), + names_(names), + pool_(new ::ThreadPool(dst_places.size())) { + std::shared_ptr reader( + new operators::reader::PyReader(queue)); + + readers_.reserve(dst_places.size()); + for (auto &p : dst_places) { + auto *holder = new framework::ReaderHolder(); + if (use_double_buffer) { + holder->Reset( + framework::MakeDecoratedReader( + reader, p, 2)); + } else { + if (platform::is_gpu_place(p)) { + PADDLE_THROW( + "Place cannot be CUDAPlace when use_double_buffer is False"); + } + holder->Reset(reader); + } + readers_.emplace_back(holder); + } + + futures_.resize(dst_places.size()); + ret_.resize(dst_places.size()); + ReadAsync(); + } + + ResultDictList ReadNext() { + bool success = WaitFutures(); + + if (!success) { + return {}; + } + + ResultDictList result(ret_.size()); + for (size_t i = 0; i < ret_.size(); ++i) { + for (size_t j = 0; j < names_.size(); ++j) { + result[i].emplace(names_[j], std::move(ret_[i][j])); + } + } + ReadAsync(); + return result; + } + + void Reset() { + Shutdown(); + Start(); + ReadAsync(); + } + + ~MultiDeviceFeedReader() { + queue_->Close(); + pool_.reset(); + } + + private: + bool WaitFutures() { + bool success = true; + for (auto &f : futures_) { + success &= f.get(); + } + return success; + } + + void Shutdown() { + for (auto &r : readers_) r->Shutdown(); + } + + void Start() { + for (auto &r : readers_) r->Start(); + } + + void ReadAsync() { + for (size_t i = 0; i < readers_.size(); ++i) { + futures_[i] = pool_->enqueue([this, i] { + readers_[i]->ReadNext(&ret_[i]); + return !ret_[i].empty(); + }); + } + } + + std::shared_ptr queue_; + std::vector names_; + std::unique_ptr<::ThreadPool> pool_; + + std::vector> readers_; + + std::vector> futures_; + std::vector> ret_; +}; + +namespace py = pybind11; + +void BindReader(py::module *module) { + auto &m = *module; + + namespace reader = ::paddle::operators::reader; + + py::class_(m, "Reader", "") + .def("start", &framework::ReaderHolder::Start) + .def("reset", &framework::ReaderHolder::ResetAll); + + py::class_(m, "MultiDeviceFeedReader", "") + .def("read_next", &MultiDeviceFeedReader::ReadNext, + py::call_guard()) + .def("reset", &MultiDeviceFeedReader::Reset, + py::call_guard()); + + m.def("create_py_reader", + [](const std::shared_ptr + &queue, + const std::vector &names, + const std::vector &dst_places, + bool use_double_buffer) { + return new MultiDeviceFeedReader(queue, names, dst_places, + use_double_buffer); + }, + py::return_value_policy::take_ownership); +} + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/reader_py.h b/paddle/fluid/pybind/reader_py.h new file mode 100644 index 0000000000000000000000000000000000000000..472ff65368f3fb206ae599ae5d9d11e9ae8195ae --- /dev/null +++ b/paddle/fluid/pybind/reader_py.h @@ -0,0 +1,25 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "pybind11/pybind11.h" + +namespace paddle { +namespace pybind { + +void BindReader(pybind11::module *module); + +} // namespace pybind +} // namespace paddle diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index ecdc8f3dc75cc8b72520e0fd1411e23d2dbb07e2..4a780f1cb53e8eba8826f6c737f19b537372bc5b 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -14,16 +14,22 @@ limitations under the License. */ #pragma once #include +#include +#include #include #include #include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/operators/math/concat_and_split.h" +#include "paddle/fluid/operators/strided_memcpy.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/float16.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" +namespace py = pybind11; + namespace paddle { namespace pybind { namespace details { @@ -191,6 +197,253 @@ inline void PyCPUTensorSetFromArray( std::memcpy(dst, array.data(), sizeof(uint16_t) * array.size()); } +template +void _sliceCompute(const framework::Tensor *in, framework::Tensor *out, + const platform::CPUDeviceContext &ctx, + const std::vector &axes, + const std::vector &starts) { + auto &eigen_place = *ctx.eigen_device(); + auto place = in->place(); + auto out_dims = out->dims(); + auto in_dims = in->dims(); + + auto offsets = Eigen::array(); + auto extents = Eigen::array(); + for (size_t i = 0; i < D; ++i) { + offsets[i] = 0; + extents[i] = out_dims[i]; + } + int start; + for (size_t i = 0; i < axes.size(); ++i) { + start = starts[i]; + if (start < 0) { + start = (start + in_dims[axes[i]]); + } + start = std::max(start, 0); + offsets[axes[i]] = start; + } + auto in_t = + framework::EigenTensor::From( + *in); + auto out_t = + framework::EigenTensor::From( + *out); + out_t.device(eigen_place) = in_t.slice(offsets, extents); +} + +template +void _concatCompute(const std::vector &ins, + paddle::framework::Tensor *out, + const platform::CPUDeviceContext &ctx, int64_t axis) { + if (axis == 0 && ins.size() < 10) { + size_t output_offset = 0; + for (auto &in : ins) { + auto in_stride = framework::stride_numel(in.dims()); + auto out_stride = framework::stride_numel(out->dims()); + paddle::operators::StridedNumelCopyWithAxis( + ctx, axis, out->data() + output_offset, out_stride, in.data(), + in_stride, in_stride[axis]); + output_offset += in_stride[axis]; + } + } else { + paddle::operators::math::ConcatFunctor + concat_functor; + concat_functor(ctx, ins, static_cast(axis), out); + } +} + +void _getSliceinfo(const framework::Tensor &self, py::object obj, + const int64_t dim, int64_t *pstart, int64_t *pstop, + int64_t *pstep, int64_t *pslicelength) { + auto &start = *pstart; + auto &stop = *pstop; + auto &step = *pstep; + auto &slicelength = *pslicelength; + const framework::DDim &srcDDim = self.dims(); + if (dim < 0 || dim >= srcDDim.size()) { + throw py::index_error(); + } + if (py::isinstance(obj)) { + size_t lstart, lstop, lstep, lslicelength; + py::slice s = static_cast(obj); + if (!s.compute(srcDDim[dim], &lstart, &lstop, &lstep, &lslicelength)) { + throw py::index_error(); + } + start = static_cast(lstart); + stop = static_cast(lstop); + step = static_cast(lstep); + slicelength = static_cast(lslicelength); + } else if (py::isinstance(obj)) { + start = static_cast(static_cast(obj)); + if (std::abs(start) >= srcDDim[dim]) { + throw py::index_error(); + } + start = (start >= 0) ? start : srcDDim[dim] - start; + stop = start + 1; + step = 1; + slicelength = 1; + } else { + throw py::index_error(); + } +} + +inline framework::Tensor *_getTensor(const framework::Tensor &self, + const framework::DDim &ddim) { + framework::Tensor *output = new framework::Tensor(); + output->Resize(ddim); + auto place = self.place(); + if (platform::is_cpu_place(place)) { + output->mutable_data(boost::get(place), self.type()); +#ifdef PADDLE_WITH_CUDA + } else { + if (platform::is_cuda_pinned_place(place)) { + output->mutable_data(boost::get(place), + self.type()); + } else if ((platform::is_gpu_place(place))) { + output->mutable_data(boost::get(place), self.type()); + } +#endif + } + return output; +} + +template +void _sliceDapper(const framework::Tensor *in, framework::Tensor *out, + const platform::CPUDeviceContext &ctx, + const std::vector &axes, const std::vector &starts, + int size) { + switch (size) { + case 1: + _sliceCompute(in, out, ctx, axes, starts); + break; + case 2: + _sliceCompute(in, out, ctx, axes, starts); + break; + case 3: + _sliceCompute(in, out, ctx, axes, starts); + break; + case 4: + _sliceCompute(in, out, ctx, axes, starts); + break; + case 5: + _sliceCompute(in, out, ctx, axes, starts); + break; + case 6: + _sliceCompute(in, out, ctx, axes, starts); + break; + case 7: + _sliceCompute(in, out, ctx, axes, starts); + break; + case 8: + _sliceCompute(in, out, ctx, axes, starts); + break; + case 9: + _sliceCompute(in, out, ctx, axes, starts); + break; + default: + PADDLE_THROW("dim size not exepected, current is %d", size); + break; + } +} + +template +inline framework::Tensor *_sliceWrapper(const framework::Tensor &self, + const platform::CPUDeviceContext &ctx, + py::object obj, int dim, int64_t start, + int64_t slicelength) { + framework::DDim dstDDim = self.dims(); + dstDDim[dim] = static_cast(slicelength); + std::vector axes({dim}); + std::vector starts({static_cast(start)}); + framework::Tensor *output = _getTensor(self, dstDDim); + _sliceDapper(&self, output, ctx, axes, starts, dstDDim.size()); + return output; +} + +template +inline framework::Tensor *_sliceAndConcat(const framework::Tensor &self, + py::object obj, int dim) { + platform::CPUDeviceContext ctx; + int64_t start, stop, step, slicelength; + _getSliceinfo(self, obj, dim, &start, &stop, &step, &slicelength); + if (step == 1 || slicelength == 1) { + return _sliceWrapper(self, ctx, obj, dim, start, slicelength); + } else { + std::vector ins; + for (auto i = 0; i < slicelength; ++i, start += step) { + ins.emplace_back(*_sliceWrapper(self, ctx, obj, dim, start, 1)); + } + + // do the concat operation + framework::DDim dstDDim = self.dims(); + dstDDim[dim] = static_cast(slicelength); + framework::Tensor *output1 = _getTensor(self, dstDDim); + _concatCompute(ins, output1, ctx, dim); + return output1; + } +} + +inline framework::Tensor *_sliceTensor(const framework::Tensor &self, + py::object obj, int dim) { + auto src_type = self.type(); + switch (src_type) { + case framework::proto::VarType::FP16: + return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::FP32: + return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::FP64: + return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::INT32: + return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::INT64: + return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::BOOL: + return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::INT16: + return _sliceAndConcat(self, obj, dim); + case framework::proto::VarType::UINT8: + return _sliceAndConcat(self, obj, dim); + default: + PADDLE_THROW("Not support type %d", src_type); + } +} + +inline framework::Tensor *_pySliceTensor(const framework::Tensor &self, + py::object obj) { + if (py::isinstance(obj)) { + py::list l = static_cast(obj); + std::unique_ptr target; + framework::Tensor *src = const_cast(&self); + for (auto i = 0; i < static_cast(l.size()); ++i) { + src = _sliceTensor(*src, l[i], i); + if (i + 1 == static_cast(l.size())) { + return src; + } else { + target.reset(src); + } + } + return nullptr; + } else { + return _sliceTensor(self, obj, 0); + } +} + +inline framework::Tensor *PySliceTensor(const framework::Tensor &self, + py::object obj) { + if (platform::is_gpu_place(self.place())) { + std::unique_ptr holder; + framework::Tensor src; + framework::TensorCopySync(self, platform::CPUPlace(), &src); + framework::Tensor *output = _pySliceTensor(src, obj); + holder.reset(output); + framework::Tensor *dst = _getTensor(*output, output->dims()); + framework::TensorCopySync(*output, self.place(), dst); + return dst; + } else { + return _pySliceTensor(self, obj); + } +} + #ifdef PADDLE_WITH_CUDA template void PyCUDATensorSetFromArray( diff --git a/python/paddle/dataset/flowers.py b/python/paddle/dataset/flowers.py index 57c5e83c82d216f55a33e568849d87689f86270f..5728a37fc33467968ca68de316d963f31f66da03 100644 --- a/python/paddle/dataset/flowers.py +++ b/python/paddle/dataset/flowers.py @@ -46,9 +46,9 @@ import six from six.moves import cPickle as pickle __all__ = ['train', 'test', 'valid'] -DATA_URL = 'http://paddlemodels.cdn.bcebos.com/flowers/102flowers.tgz' -LABEL_URL = 'http://paddlemodels.cdn.bcebos.com/flowers/imagelabels.mat' -SETID_URL = 'http://paddlemodels.cdn.bcebos.com/flowers/setid.mat' +DATA_URL = 'http://paddlemodels.bj.bcebos.com/flowers/102flowers.tgz' +LABEL_URL = 'http://paddlemodels.bj.bcebos.com/flowers/imagelabels.mat' +SETID_URL = 'http://paddlemodels.bj.bcebos.com/flowers/setid.mat' DATA_MD5 = '52808999861908f626f3c1f4e79d11fa' LABEL_MD5 = 'e0620be6f572b9609742df49c70aed4d' SETID_MD5 = 'a5357ecc9cb78c4bef273ce3793fc85c' diff --git a/python/paddle/dataset/wmt14.py b/python/paddle/dataset/wmt14.py index adc0c1aac80cbdb0b0c04535fc39b6a172d23eec..450f159f9d10c282849e6e26fb595fb683b1a02e 100644 --- a/python/paddle/dataset/wmt14.py +++ b/python/paddle/dataset/wmt14.py @@ -15,7 +15,7 @@ WMT14 dataset. The original WMT14 dataset is too large and a small set of data for set is provided. This module will download dataset from -http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz and +http://paddlepaddle.bj.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz and parse training set and test set into paddle reader creators. """ diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 63b7b28948a783bc5910d53f6e65a8c09d77bdb1..18f01ca1374a24cec3bf882d347596dd38f4fd21 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -59,13 +59,14 @@ from .parallel_executor import * from . import compiler from .compiler import * from paddle.fluid.layers.math_op_patch import monkey_patch_variable +from . import install_check Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + \ trainer.__all__ + inferencer.__all__ + transpiler.__all__ + \ parallel_executor.__all__ + lod_tensor.__all__ + \ - data_feed_desc.__all__ + async_executor.__all__ + compiler.__all__ + [ + data_feed_desc.__all__ + async_executor.__all__ + compiler.__all__ + [ 'io', 'initializer', 'layers', @@ -91,6 +92,7 @@ __all__ = framework.__all__ + executor.__all__ + \ 'unique_name', 'recordio_writer', 'Scope', + 'install_check', ] diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 5732377bd60f849494ae7e463f40d4843ffa2c23..ac2a40a7c25f7c3ff0cc103647355da55d27fec3 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -17,9 +17,10 @@ import os import six import sys from .. import compat as cpt +from . import framework +from .framework import cuda_places, cpu_places from . import core -from . import framework __all__ = ['CompiledProgram', 'ExecutionStrategy', 'BuildStrategy'] @@ -44,21 +45,6 @@ def _is_pserver_mode(main_program): return False -def get_available_places(use_cuda): - if use_cuda: - gpus_env = os.getenv("FLAGS_selected_gpus") - if gpus_env: - gpus = [int(s) for s in gpus_env.split(",")] - else: - gpus = [i for i in six.moves.range(core.get_cuda_device_count())] - places = [core.CUDAPlace(i) for i in gpus] - else: - cpu_num = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) - places = [core.CPUPlace() for _ in six.moves.range(cpu_num)] - assert places, "no place for execution" - return places - - class CompiledProgram(object): """ Compiles to Graph for execution. @@ -117,7 +103,8 @@ class CompiledProgram(object): loss_name=None, build_strategy=None, exec_strategy=None, - share_vars_from=None): + share_vars_from=None, + places=None): """Configs the program to run in data parallel way. Args: @@ -132,10 +119,18 @@ class CompiledProgram(object): threads are used, how many iterations to clean up the temp variables. For more information, please refer to fluid.ExecutionStrategy. Default None. - share_vars_from(CompiledProgram): If provide, this CompiledProgram + share_vars_from(CompiledProgram): If provided, this CompiledProgram will share variables from `share_vars_from`. `share_vars_from` must be run by the executor before this CompiledProgram so that vars are ready. + places(list(CUDAPlace)|list(CPUPlace)|None): If provided, only compile + program in the given places. Otherwise, the places used when compiled + is determined by the Executor, and the places used are controlled + by environment variables: FLAGS_selected_gpus or CUDA_VISIBLE_DEVICES + if using GPU; or CPU_NUM if using CPU. For example, if you want to + run on GPU 0 and 1, set places=[fluid.CUDAPlace(0), fluid.CUDAPlace(1)]. + If you want to run on 2 CPU cores, set places=[fluid.CPUPlace()]*2. + Returns: self """ @@ -150,6 +145,12 @@ class CompiledProgram(object): self._exec_strategy = ExecutionStrategy() if self._build_strategy is None: self._build_strategy = BuildStrategy() + if places is not None: + if not isinstance(places, (list, tuple)): + places = [places] + self._places = places + else: + self._places = None self._build_strategy.is_distribution = _is_pserver_mode(self._program) return self @@ -192,7 +193,15 @@ class CompiledProgram(object): self._local_scopes = [] self._exec_strategy.use_cuda = use_cuda - self._places = get_available_places(self._exec_strategy.use_cuda) + has_set_place = (self._places is not None) + if has_set_place: + for p in self._places: + assert p._type() == self._place._type(), \ + "Place type not match. You may set the wrong type of places" + else: + self._places = cuda_places( + ) if self._exec_strategy.use_cuda else cpu_places() + assert self._places, "no place for execution" if self._exec_strategy.num_threads == 0: if self._exec_strategy.use_cuda: @@ -200,9 +209,7 @@ class CompiledProgram(object): # performance. Worth tunning for other models in the future. self._exec_strategy.num_threads = len(self._places) * 4 else: - cpu_num = int( - os.environ.get('CPU_NUM', multiprocessing.cpu_count())) - self._exec_strategy.num_threads = cpu_num * 2 + self._exec_strategy.num_threads = len(self._places) * 2 # FIXME(dzhwinter): enable_inplace should be after memory_optimize # if turn on python memory optimize, turn off the inplace_pass. diff --git a/python/paddle/fluid/contrib/int8_inference/README.md b/python/paddle/fluid/contrib/int8_inference/README.md index 460ae393f158ae320c93601365a68b8cfe2ba50e..3228610f968c9bec86d6bf781585038ffd095bce 100644 --- a/python/paddle/fluid/contrib/int8_inference/README.md +++ b/python/paddle/fluid/contrib/int8_inference/README.md @@ -45,28 +45,41 @@ You can load INT8 model by load_inference_model [API](https://github.com/PaddleP ``` ## 3. Result -We provide the results of accuracy measurd on [Intel® Xeon® Platinum Gold Processor](https://ark.intel.com/products/120489/Intel-Xeon-Gold-6148-Processor-27-5M-Cache-2-40-GHz- "Intel® Xeon® Gold 6148 Processor") (also known as Intel® Xeon® Skylake6148). +We provide the results of accuracy and performance measured on Intel(R) Xeon(R) Gold 6271 (single core). + +**I. Top-1 Accuracy on Intel(R) Xeon(R) Gold 6271** | Model | Dataset | FP32 Accuracy | INT8 Accuracy | Accuracy Diff | -| ------------ | ------------ | ------------ | ------------ | ------------ | -| ResNet-50 | Small | 72.00% | 72.00% | 0.00% | -| MobileNet-V1 | Small | 62.00% | 62.00% | 0.00% | -| ResNet-50 | Full ImageNet Val | 76.63% | 76.17% | 0.46% | -| MobileNet-V1 | Full ImageNet Val | 70.78% | 70.49% | 0.29% | +| :------------: | :------------: | :------------: | :------------: | :------------: | +| ResNet-50 | Full ImageNet Val | 76.63% | 76.23% | 0.40% | +| MobileNet-V1 | Full ImageNet Val | 70.78% | 70.47% | 0.31% | + +**II. Throughput on Intel(R) Xeon(R) Gold 6271 (batch size 1 on single core)** + +| Model | Dataset | FP32 Throughput | INT8 Throughput | Ratio(INT8/FP32) | +| :------------: | :------------: | :------------: | :------------: | :------------: | +| ResNet-50 | Full ImageNet Val | 11.54 images/s | 32.2 images/s | 2.79 | +| MobileNet-V1 | Full ImageNet Val | 49.21 images/s | 108.37 images/s | 2.2 | -Please note that [Small](http://paddle-inference-dist.cdn.bcebos.com/int8/calibration_test_data.tar.gz "Small") is a subset of [full ImageNet validation dataset](http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar "full ImageNet validation dataset"). +Please note that [full ImageNet validation dataset](http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar "full ImageNet validation dataset") can be downloaded by script `test_calibration.py` with `DATASET=full`. Notes: * The accuracy measurement requires the model with `label`. -* The INT8 theoretical speedup is ~1.33X on Intel® Xeon® Skylake Server (please refer to `This allows for 4x more input at the cost of 3x more instructions or 33.33% more compute` in [Reference](https://software.intel.com/en-us/articles/lower-numerical-precision-deep-learning-inference-and-training "Reference")). +* The INT8 theoretical speedup is 4X on Intel® Xeon® Cascadelake Server (please refer to `The theoretical peak compute gains are 4x int8 OPS over fp32 OPS.` in [Reference](https://software.intel.com/en-us/articles/lower-numerical-precision-deep-learning-inference-and-training "Reference")). Therefore, op-level gain is 4X and topology-level is smaller. ## 4. How to reproduce the results -* Small dataset +* Small dataset (Single core) ```bash FLAGS_use_mkldnn=true python python/paddle/fluid/contrib/tests/test_calibration.py ``` -* Full dataset +* Full dataset (Single core) ```bash FLAGS_use_mkldnn=true DATASET=full python python/paddle/fluid/contrib/tests/test_calibration.py ``` + +* Full dataset (Multi-core) +```bash +FLAGS_use_mkldnn=true OMP_NUM_THREADS=20 DATASET=full python python/paddle/fluid/contrib/tests/test_calibration.py +``` +> Notes: This is an example command with 20 cores by using set `OMP_NUM_THREADS` value. diff --git a/python/paddle/fluid/contrib/slim/core/compressor.py b/python/paddle/fluid/contrib/slim/core/compressor.py index 832ade497c67ee16b6068cad4f0edace94128989..1547b6abbe660b6be7a681a4e270e3080a5dac36 100644 --- a/python/paddle/fluid/contrib/slim/core/compressor.py +++ b/python/paddle/fluid/contrib/slim/core/compressor.py @@ -271,7 +271,7 @@ class Compressor(object): self.eval_reader = eval_reader self.teacher_graphs = [] for teacher in teacher_programs: - self.teacher_graphs.append(ImitationGraph(teacher, scope=scope)) + self.teacher_graphs.append(GraphWrapper(teacher)) self.checkpoint = None self.checkpoint_path = checkpoint_path diff --git a/python/paddle/fluid/contrib/slim/core/config.py b/python/paddle/fluid/contrib/slim/core/config.py index 12df9fcd1b0042c26aabac88d6ecba5fb827cba0..9bb395aee95b5236850ca51096ed870ab1d27b62 100644 --- a/python/paddle/fluid/contrib/slim/core/config.py +++ b/python/paddle/fluid/contrib/slim/core/config.py @@ -19,6 +19,7 @@ from collections import OrderedDict from ..prune import * from ..quantization import * from .strategy import * +from ..distillation import * __all__ = ['ConfigFactory'] """This factory is used to create instances by loading and parsing configure file with yaml format. diff --git a/python/paddle/fluid/contrib/slim/distillation/__init__.py b/python/paddle/fluid/contrib/slim/distillation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..455c7c563318daec42892e71dcf0a48f22f376a1 --- /dev/null +++ b/python/paddle/fluid/contrib/slim/distillation/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import distiller +from .distiller import * +from . import distillation_strategy +from .distillation_strategy import * + +__all__ = distiller.__all__ +__all__ += distillation_strategy.__all__ diff --git a/python/paddle/fluid/contrib/slim/distillation/distillation_strategy.py b/python/paddle/fluid/contrib/slim/distillation/distillation_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..1f11f07a51e713d42cee5e63bd2a9a02d82232f7 --- /dev/null +++ b/python/paddle/fluid/contrib/slim/distillation/distillation_strategy.py @@ -0,0 +1,94 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ..core.strategy import Strategy +from ....framework import Program, program_guard +from .... import Executor +import logging + +__all__ = ['DistillationStrategy'] + +logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s') +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +class DistillationStrategy(Strategy): + def __init__(self, distillers=None, start_epoch=0, end_epoch=0): + """ + Args: + distillers(list): A list of distiller used to combine student graph and teacher graph + by adding some loss. + start_epoch(int): The epoch when to merge student graph and teacher graph for + distillation training. default: 0 + end_epoch(int): The epoch when to finish distillation training. default: 0 + + """ + super(DistillationStrategy, self).__init__(start_epoch, end_epoch) + self.distillers = distillers + + def on_compression_begin(self, context): + # load from checkpoint + if context.epoch_id > 0: + if context.epoch_id > self.start_epoch and context.epoch_id < self.end_epoch: + _logger.info('Restore DistillationStrategy') + self._create_distillation_graph(context) + _logger.info('Restore DistillationStrategy finish.') + + def on_epoch_begin(self, context): + if self.start_epoch == context.epoch_id: + _logger.info('DistillationStrategy::on_epoch_begin.') + self._create_distillation_graph(context) + _logger.info('DistillationStrategy set optimize_graph.') + + def _create_distillation_graph(self, context): + """ + step 1: Merge student graph and teacher graph into distillation graph. + step 2: Add loss into distillation graph by distillers. + step 3: Append backward ops and optimize ops into distillation graph for training. + """ + # step 1 + teacher = context.teacher_graphs[0] + for var in teacher.program.list_vars(): + var.stop_gradient = True + graph = context.train_graph.clone() + graph.merge(teacher) + graph.out_nodes['student_loss'] = graph.out_nodes['loss'] + + # step 2 + for distiller in self.distillers: + graph = distiller.distiller_loss(graph) + + # step 3 + startup_program = Program() + with program_guard(graph.program, startup_program): + context.distiller_optimizer._name = 'distillation_optimizer' + context.distiller_optimizer.minimize( + graph.var(graph.out_nodes['loss'])._var) + exe = Executor(context.place) + exe.run(startup_program, scope=context.scope) + + # backup graph for fine-tune after distillation + context.put('distillation_backup_optimize_graph', + context.optimize_graph) + context.optimize_graph = graph + + def on_epoch_end(self, context): + if context.epoch_id == (self.end_epoch - 1): + _logger.info('DistillationStrategy::on_epoch_end.') + # restore optimize_graph for fine-tune or other strategy in next stage. + context.optimize_graph = context.get( + 'distillation_backup_optimize_graph') + _logger.info( + 'DistillationStrategy set context.optimize_graph to None.') diff --git a/python/paddle/fluid/contrib/slim/distillation/distiller.py b/python/paddle/fluid/contrib/slim/distillation/distiller.py new file mode 100644 index 0000000000000000000000000000000000000000..13bb35a8be73ed29e907308d08a33cdc13dee069 --- /dev/null +++ b/python/paddle/fluid/contrib/slim/distillation/distiller.py @@ -0,0 +1,188 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .... import layers +from .... import optimizer +from .... import Executor +from .... import Program +from .... import program_guard +from .... import regularizer + +__all__ = ['FSPDistiller', 'L2Distiller'] + + +class L2Distiller(object): + """ + Combine two layers from student net and teacher net by l2-loss. + And add the loss into the total loss using for distillation training. + """ + + def __init__(self, + student_feature_map, + teacher_feature_map, + distillation_loss_weight=1): + """ + Args: + student_feature_map(str): The name of feature map from student network. + teacher_feature_map(str): The name of feature map from teacher network. + It's shape should be the same with student network. + distillation_loss_weight(float): The weight of the l2-loss. + """ + self.student_feature_map = student_feature_map + self.teacher_feature_map = teacher_feature_map + self.distillation_loss_weight = distillation_loss_weight + + def distiller_loss(self, graph): + """ + Modify graph inplace to add l2-loss. + Args: + graph(GraphWrapper): The graph to be modified. + Returns: + GraphWrapper: The modified graph. + """ + distiller_pass = L2DistillerPass(self.student_feature_map, + self.teacher_feature_map, + self.distillation_loss_weight) + dis_graph = distiller_pass.apply(graph) + return dis_graph + + +class L2DistillerPass(object): + """ + The pass used to add l2-loss. + """ + + def __init__(self, + student_feature_map, + teacher_feature_map, + distillation_loss_weight=1): + """ + Args: + student_feature_map(str): The name of feature map from student network. + teacher_feature_map(str): The name of feature map from teacher network. + It's shape should be the same with student network. + distillation_loss_weight(float): The weight of the l2-loss. + """ + self.student_feature_map = student_feature_map + self.teacher_feature_map = teacher_feature_map + self.distillation_loss_weight = distillation_loss_weight + + def apply(self, graph): + ret_graph = graph + with program_guard(ret_graph.program): + + student_feature_map = ret_graph.var(self.student_feature_map)._var + teacher_feature_map = ret_graph.var(self.teacher_feature_map)._var + l2loss = layers.reduce_mean( + layers.square(student_feature_map - teacher_feature_map)) + + distillation_loss = l2loss * self.distillation_loss_weight + student_loss = ret_graph.var(ret_graph.out_nodes['loss'])._var + loss = distillation_loss + student_loss + + ret_graph.out_nodes[ + 'l2loss_' + self.student_feature_map + "_" + + self.teacher_feature_map] = distillation_loss.name + ret_graph.out_nodes['loss'] = loss.name + return ret_graph + + +class FSPDistiller(object): + """ + Combine layers from student net and teacher net by fsp-loss. + """ + + def __init__(self, student_pairs, teacher_pairs, + distillation_loss_weight=1): + """ + Args: + student_pairs(list): Each tuple, with two variable names, in student_pairs indicates + a section in student network. The variables in a tuple should + have the same feature map size. + teacher_pairs(list): Each tuple, with two variable names, in teacher_pairs indicates + a section in teacher network. The variables in a tuple should + have the same feature map size. Varibale named teacher_pairs[i][j] + should has the save channel number with that of variable named + student_pairs[i][j]. + + distillation_loss_weight(float): The weight of the fsp-loss. default: 1. + """ + self.student_pairs = student_pairs + self.teacher_pairs = teacher_pairs + self.distillation_loss_weight = distillation_loss_weight + + def distiller_loss(self, graph): + """ + Modify graph inplace to add fsp-loss. + Args: + graph(GraphWrapper): The graph to be modified. + Returns: + GraphWrapper: The modified graph. + """ + distiller_pass = FSPDistillerPass(self.student_pairs, + self.teacher_pairs, + self.distillation_loss_weight) + dis_graph = distiller_pass.apply(graph) + return dis_graph + + +class FSPDistillerPass(object): + ''' + Combine layers from student net and teacher net by fsp-loss. + ''' + + def __init__(self, s_pairs, t_pairs, distillation_loss_weight=1): + """ + Args: + s_pairs(list): Each tuple, with two variable names, in student_pairs indicates + a section in student network. The variables in a tuple should + have the same feature map size. + t_pairs(list): Each tuple, with two variable names, in teacher_pairs indicates + a section in teacher network. The variables in a tuple should + have the same feature map size. Varibale named teacher_pairs[i][j] + should has the save channel number with that of variable named + student_pairs[i][j]. + + distillation_loss_weight(float): The weight of the fsp-loss. default: 1. + """ + self.s_pairs = s_pairs + self.t_pairs = t_pairs + self.distillation_loss_weight = distillation_loss_weight + + def apply(self, graph): + ret_graph = graph + with program_guard(ret_graph.program): + losses = [] + for s_pair, t_pair in zip(self.s_pairs, self.t_pairs): + s_pair_start = ret_graph.var(s_pair[0])._var + s_pair_end = ret_graph.var(s_pair[1])._var + s_fsp_matrix = self._fsp_matrix(s_pair_start, s_pair_end) + t_pair_start = ret_graph.var(t_pair[0])._var + t_pair_end = ret_graph.var(t_pair[1])._var + t_fsp_matrix = self._fsp_matrix(t_pair_start, t_pair_end) + l2_loss = layers.reduce_mean( + layers.square(s_fsp_matrix - t_fsp_matrix)) + losses.append(l2_loss) + distillation_loss = layers.sum( + losses) * self.distillation_loss_weight + student_loss = ret_graph.var(ret_graph.out_nodes['loss'])._var + loss = distillation_loss + student_loss + + ret_graph.out_nodes[ + 'fsp_distillation_loss'] = distillation_loss.name + ret_graph.out_nodes['loss'] = loss.name + return ret_graph + + def _fsp_matrix(self, fea_map_0, fea_map_1): + return layers.fsp_matrix(fea_map_0, fea_map_1) diff --git a/python/paddle/fluid/contrib/slim/graph/graph_wrapper.py b/python/paddle/fluid/contrib/slim/graph/graph_wrapper.py index 8694be782708a6d47b3e1450305975d34fd3bd7f..c208553fd811c7b18f9168b8fcae4da6e5856070 100644 --- a/python/paddle/fluid/contrib/slim/graph/graph_wrapper.py +++ b/python/paddle/fluid/contrib/slim/graph/graph_wrapper.py @@ -300,7 +300,9 @@ class GraphWrapper(object): graph(GraphWrapper): The graph to be merged by current graph. """ for var in graph.program.list_vars(): - self.program.global_block()._clone_variable(var) + new_var = self.program.global_block()._clone_variable( + var, force_persistable=False) + new_var.stop_gradient = var.stop_gradient # TODO: parameters should be cloned for op in graph.ops(): op = op._op @@ -309,12 +311,12 @@ class GraphWrapper(object): attrs = {} for input_name in op.input_names: inputs[input_name] = [ - self.var(in_var_name) - for in_var_name in op.inputs(input_name) + self.var(in_var_name)._var + for in_var_name in op.input(input_name) ] for output_name in op.output_names: outputs[output_name] = [ - self.var(out_var_name) + self.var(out_var_name)._var for out_var_name in op.output(output_name) ] for attr_name in op.attr_names: diff --git a/python/paddle/fluid/contrib/slim/quantization/__init__.py b/python/paddle/fluid/contrib/slim/quantization/__init__.py index 6c26475f48855674d97abf5778a631646734fcf8..1c51aa15373779b06273296a27d913c070079f41 100644 --- a/python/paddle/fluid/contrib/slim/quantization/__init__.py +++ b/python/paddle/fluid/contrib/slim/quantization/__init__.py @@ -16,5 +16,7 @@ from __future__ import print_function from . import quantization_pass from .quantization_pass import * +from . import quantization_strategy +from .quantization_strategy import * -__all__ = quantization_pass.__all__ +__all__ = quantization_pass.__all__ + quantization_strategy.__all__ diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 5dcef506711b78c2aef30d16719f8766359ae8f3..ab3bd8bd182c7e933c58e2ba2f3548f2d001cbdb 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -14,15 +14,10 @@ import collections import numpy as np -import six from ..... import compat as cpt from .... import core -from .... import Executor from ....framework import IrGraph from ....framework import IrNode -from ....framework import Program -from ....initializer import Constant -from ....initializer import NumpyArrayInitializer from .... import unique_name __all__ = [ @@ -107,7 +102,6 @@ class QuantizationTransformPass(object): self._window_size = window_size self._moving_rate = moving_rate - self._need_initialized = collections.OrderedDict() self._quantizable_ops = ['conv2d', 'depthwise_conv2d', 'mul'] self._conv_ops = ['conv2d', 'depthwise_conv2d'] self._quantizable_grad_ops = [ @@ -127,7 +121,8 @@ class QuantizationTransformPass(object): """ assert isinstance(graph, IrGraph), 'graph must be the instance of IrGraph.' - self._need_initialized.clear() + #sequential_execution = core.get_pass('sequential_execution_pass') + #sequential_execution.apply(graph.graph) self._is_test = graph.is_test() # marked the variable which has been dequantized. dequantized_vars = collections.OrderedDict() @@ -135,6 +130,8 @@ class QuantizationTransformPass(object): def _transform_forward(graph, op): for var_node in op.inputs: + if var_node.name() not in op.input_arg_names(): + continue if var_node.name() in dequantized_vars: dequant_var_node = dequantized_vars[var_node.name()] else: @@ -168,6 +165,8 @@ class QuantizationTransformPass(object): def _transform_backward(graph, op): no_dequanted_input_vars = True for var_node in op.inputs: + if var_node.name() not in op.input_arg_names(): + continue if var_node.name() in dequantized_vars: dequant_var_node = dequantized_vars[var_node.name()] graph.update_input_link(var_node, dequant_var_node, op) @@ -188,25 +187,7 @@ class QuantizationTransformPass(object): for op in ops: if op.name() in self._quantizable_grad_ops: _transform_backward(graph, op) - - if len(self._need_initialized) > 0: - assert self._scope is not None, \ - 'The scope cannot be set None when activation_quantize_type equals to range_abs_max.' - assert self._place is not None, \ - 'The place cannot be set None when activation_quantize_type equals to range_abs_max.' - init_program = Program() - for var_desc, initializer in six.iteritems(self._need_initialized): - var = init_program.global_block().create_var( - name=var_desc.name(), - shape=var_desc.shape(), - dtype=var_desc.dtype(), - type=var_desc.type(), - lod_level=var_desc.lod_level(), - persistable=var_desc.persistable()) - initializer(var, init_program.global_block()) - exe = Executor(self._place) - exe.run(program=init_program, scope=self._scope) - + graph.resolve_hazard() return graph def _create_global_step(self, graph): @@ -222,8 +203,9 @@ class QuantizationTransformPass(object): var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], var_dtype=core.VarDesc.VarType.INT64) - self._need_initialized[global_step_in.var()] = \ - Constant(value=0, force_cpu=True) + self._init_var_node( + global_step_in, np.zeros( + [1], dtype='int64')) global_step_out = graph.create_var_node_from_desc( global_step_in.var()) # The attribute of `op_role` is needed by ParallelExecutor. @@ -300,7 +282,9 @@ class QuantizationTransformPass(object): var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], var_dtype=var_node.dtype()) - self._need_initialized[scale_in_node.var()] = Constant(value=0.001) + data_type = 'float64' if var_node.dtype( + ) == core.VarDesc.VarType.FP64 else 'float32' + self._init_var_node(scale_in_node, np.array([0.001], dtype=data_type)) scale_out_node = graph.create_var_node_from_desc(scale_in_node.var()) inputs = {'X': var_node, 'InScale': scale_in_node} @@ -313,7 +297,11 @@ class QuantizationTransformPass(object): var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[self._window_size], var_dtype=var_node.dtype()) - self._need_initialized[scales_node.var()] = Constant(value=0) + data_type = 'float64' if var_node.dtype( + ) == core.VarDesc.VarType.FP64 else 'float32' + self._init_var_node( + scales_node, np.zeros( + [self._window_size], dtype=data_type)) inputs['Iter'] = self._global_step outputs['OutScales'] = scales_node attrs = { @@ -353,7 +341,9 @@ class QuantizationTransformPass(object): var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[1], var_dtype=var_node.dtype()) - self._need_initialized[scale_in_node.var()] = Constant(value=0.001) + data_type = 'float64' if var_node.dtype( + ) == core.VarDesc.VarType.FP64 else 'float32' + self._init_var_node(scale_in_node, np.array([0.001], dtype=data_type)) scale_out_node = graph.create_var_node_from_desc(scale_in_node.var()) ins = {'X': var_node, 'InScale': scale_in_node} @@ -364,13 +354,15 @@ class QuantizationTransformPass(object): var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), shape=[1]) - self._need_initialized[state_in_node.var()] = Constant(value=1) + data_type = 'float64' if var_node.dtype( + ) == core.VarDesc.VarType.FP64 else 'float32' + self._init_var_node(scale_in_node, np.ones([1], dtype=data_type)) accum_in_node = graph.create_persistable_node( name=unique_name.generate('accum'), var_type=core.VarDesc.VarType.LOD_TENSOR, var_dtype=var_node.dtype(), shape=[1]) - self._need_initialized[accum_in_node.var()] = Constant(value=1) + self._init_var_node(accum_in_node, np.ones([1], dtype=data_type)) state_out_node = graph.create_var_node_from_desc(state_in_node.var( )) accum_out_node = graph.create_var_node_from_desc(accum_in_node.var( @@ -490,6 +482,16 @@ class QuantizationTransformPass(object): graph.link_to(dequant_op_node, dequant_var_node) return dequant_var_node + def _init_var_node(self, var_node, value): + assert isinstance( + value, np.ndarray), 'The type of value should be numpy array.' + assert self._scope is not None, \ + 'The scope cannot be set None when activation_quantize_type equals to range_abs_max.' + assert self._place is not None, \ + 'The place cannot be set None when activation_quantize_type equals to range_abs_max.' + tensor = self._scope.var(var_node.name()).get_tensor() + tensor.set(value, self._place) + def _quantized_var_name(self, var_name): """ Return quantized variable name for the input `var_name`. @@ -592,7 +594,8 @@ class QuantizationFreezePass(object): self._weight_bits) self._restore_var(input_arg_name, quantized_param_v) else: - scale_v = graph.var_node(op_node.output('OutScale')[0]) + scale_v = self._to_node(op_node.outputs, + op_node.output('OutScale')[0]) self._var_scale_map[input_arg_name] = scale_v ops = graph.all_op_nodes() @@ -613,32 +616,35 @@ class QuantizationFreezePass(object): for op_node in ops: # insert dequant_op after fc/conv, need to rename inputs of the followed ops for var_node in op_node.inputs: - name = var_node.name() - if name in self._op_output_rename_map: - old_in = graph.var_node(name) - new_in = self._op_output_rename_map[name] + if var_node.node in self._op_output_rename_map: + old_in = var_node + new_in = self._op_output_rename_map[var_node.node] graph.update_input_link(old_in, new_in, op_node) # remove the unused var node in the graph self._remove_unused_var_nodes(graph) + graph.resolve_hazard() return graph def _remove_fake_quant_and_dequant_op(self, graph, op_node): - k = op_node.output('Out')[0] - v = op_node.input('X')[0] - if v not in self._op_input_rename_map: - self._op_input_rename_map[k] = v + k = self._to_node(op_node.outputs, op_node.output('Out')[0]) + v = self._to_node(op_node.inputs, op_node.input('X')[0]) + if v.node not in self._op_input_rename_map: + self._op_input_rename_map[k.node] = v else: - self._op_input_rename_map[k] = self._op_input_rename_map[v] + self._op_input_rename_map[k.node] = self._op_input_rename_map[ + v.node] graph.safe_remove_nodes(op_node) def _insert_post_channel_dequant_op(self, graph, op_node): persistable_vars = [p.name() for p in graph.all_persistable_nodes()] for var_node in op_node.inputs: name = var_node.name() - if name in self._op_input_rename_map: - old_in = graph.var_node(name) - new_in = graph.var_node(self._op_input_rename_map[name]) + if name not in op_node.input_arg_names(): + continue + if var_node.node in self._op_input_rename_map: + old_in = var_node + new_in = self._op_input_rename_map[var_node.node] new_in.clear_outputs() graph.update_input_link(old_in, new_in, op_node) original_var_name = self._original_var_name(name) @@ -653,28 +659,20 @@ class QuantizationFreezePass(object): assert isinstance(scale_v, IrNode) scale_var_node = self._var_scale_map[original_var_name] - if len(op_node.outputs) != 1: + if len(op_node.output_arg_names()) != 1: raise ValueError("Only support one output, but op %s has" " more than one output." % (op_node.name())) - output_var_node = op_node.outputs[0] + output_var_node = self._to_node(op_node.outputs, + op_node.output_arg_names()[0]) weight_scale_node = graph.create_persistable_node( name=unique_name.generate('channel_scale'), var_type=core.VarDesc.VarType.LOD_TENSOR, shape=[channel_scale.shape[0]], var_dtype=output_var_node.dtype()) - init_program = Program() - weight_scale_var = init_program.global_block().create_var( - name=weight_scale_node.name(), - shape=weight_scale_node.shape(), - dtype=weight_scale_node.dtype(), - type=weight_scale_node.type(), - lod_level=weight_scale_node.var().lod_level(), - persistable=weight_scale_node.persistable()) - initializer = NumpyArrayInitializer(value=channel_scale) - initializer(weight_scale_var, init_program.global_block()) - exe = Executor(self._place) - exe.run(program=init_program, scope=self._scope) + data_type = 'float64' if output_var_node.dtype( + ) == core.VarDesc.VarType.FP64 else 'float32' + self._init_var_node(weight_scale_node, channel_scale.astype(data_type)) dequant_var_node = graph.create_var_node( name=self._dequantized_var_name(output_var_node.name()), var_type=output_var_node.type(), @@ -695,16 +693,18 @@ class QuantizationFreezePass(object): graph.link_to(scale_var_node, dequant_op_node) graph.link_to(weight_scale_node, dequant_op_node) graph.link_to(dequant_op_node, dequant_var_node) - self._op_output_rename_map[output_var_node.name()] = dequant_var_node + self._op_output_rename_map[output_var_node.node] = dequant_var_node return dequant_var_node def _insert_post_dequant_op(self, graph, op_node): persistable_vars = [p.name() for p in graph.all_persistable_nodes()] for var_node in op_node.inputs: name = var_node.name() - if name in self._op_input_rename_map: - old_in = graph.var_node(name) - new_in = graph.var_node(self._op_input_rename_map[name]) + if name not in op_node.input_arg_names(): + continue + if var_node.node in self._op_input_rename_map: + old_in = var_node + new_in = self._op_input_rename_map[var_node.node] new_in.clear_outputs() graph.update_input_link(old_in, new_in, op_node) original_var_name = self._original_var_name(name) @@ -720,11 +720,12 @@ class QuantizationFreezePass(object): assert isinstance(scale_v, IrNode) scale_var_node = self._var_scale_map[original_var_name] - if len(op_node.outputs) != 1: + if len(op_node.output_arg_names()) != 1: raise ValueError("Only support one output, but op %s has" " more than one output." % (op_node.name())) - output_var_node = op_node.outputs[0] + output_var_node = self._to_node(op_node.outputs, + op_node.output_arg_names()[0]) dequant_var_node = graph.create_var_node( name=self._dequantized_var_name(output_var_node.name()), var_type=output_var_node.type(), @@ -742,9 +743,27 @@ class QuantizationFreezePass(object): graph.link_to(output_var_node, dequant_op_node) graph.link_to(scale_var_node, dequant_op_node) graph.link_to(dequant_op_node, dequant_var_node) - self._op_output_rename_map[output_var_node.name()] = dequant_var_node + self._op_output_rename_map[output_var_node.node] = dequant_var_node return dequant_var_node + def _init_var_node(self, var_node, value): + assert isinstance( + value, np.ndarray), 'The type of value should be numpy array.' + assert self._scope is not None, \ + 'The scope cannot be set None when activation_quantize_type equals to range_abs_max.' + assert self._place is not None, \ + 'The place cannot be set None when activation_quantize_type equals to range_abs_max.' + tensor = self._scope.var(var_node.name()).get_tensor() + tensor.set(value, self._place) + + def _to_node(self, nodes, node_name): + target_node = None + for n in nodes: + if n.name() == node_name: + target_node = n + assert target_node is not None, "Cannot find the target node in the giving set." + return target_node + def _load_var(self, name): return np.array(self._scope.find_var(name).get_tensor()) @@ -848,6 +867,7 @@ class ConvertToInt8Pass(object): # remove the unused var node in the graph self._remove_unused_var_nodes(graph) + graph.resolve_hazard() return graph def _convert_to_int8(self, graph, var_node): @@ -930,5 +950,5 @@ class TransformForMobilePass(object): for output_node in op_node.outputs: graph.link_to(dequant_node, output_node) graph.safe_remove_nodes(op_node) - + graph.resolve_hazard() return graph diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_strategy.py b/python/paddle/fluid/contrib/slim/quantization/quantization_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..6812b4c633d5b55d84fff935b696297f30b18c6b --- /dev/null +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_strategy.py @@ -0,0 +1,209 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sys +import numpy as np +from .... import Executor +from .... import io +from .... import core +from ....compiler import CompiledProgram +from ....compiler import BuildStrategy +from ....framework import IrGraph +from ..core.strategy import Strategy +from .quantization_pass import * + +__all__ = ['QuantizationStrategy'] + +logging.basicConfig(format='%(asctime)s-%(levelname)s: %(message)s') +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +class QuantizationStrategy(Strategy): + """ + The strategy for Quantization. + """ + + def __init__(self, + start_epoch=0, + end_epoch=0, + float_model_save_path=None, + mobile_model_save_path=None, + int8_model_save_path=None, + activation_bits=8, + weight_bits=8, + activation_quantize_type='abs_max', + save_in_nodes=None, + save_out_nodes=None): + """ + Args: + start_epoch(int): The 'on_epoch_begin' function will be called in start_epoch. default: 0 + end_epoch(int): The 'on_epoch_end' function will be called in end_epoch. default: 0 + float_model_save_path(str): The path to save model with float weights. + None means it doesn't save float model. defalut: None. + mobile_model_save_path(str): The path to save model for paddle-mobile execution. + None means it doesn't save mobile model. defalut: None. + int8_model_save_path(str): The path to save model with int8_t weight. + None means it doesn't save int8 model. defalut: None. + activation_bits(int): quantization bit number for activation. default: 8. + weight_bits(int): quantization bit number for weights. The bias is not quantized. + default: 8. + activation_quantize_type(str): quantization type for activation, + now support 'abs_max', 'range_abs_max' and 'moving_average_abs_max'. + If use 'abs_max' mode, the quantization scale will be calculated + dynamically each step in both training and testing period. If use + 'range_abs_max', a static quantization scale will be calculated + during training and used in inference. + save_in_nodes(list): A list of variable names used to prune graph + for saving inference model. + save_out_nodes(list): A list of variable names used to prune graph + for saving inference model. + + """ + super(QuantizationStrategy, self).__init__(start_epoch, end_epoch) + self.start_epoch = start_epoch + self.end_epoch = end_epoch + self.float_model_save_path = float_model_save_path + self.mobile_model_save_path = mobile_model_save_path + self.int8_model_save_path = int8_model_save_path + self.activation_bits = activation_bits + self.weight_bits = weight_bits + self.activation_quantize_type = activation_quantize_type + self.save_out_nodes = save_out_nodes + self.save_in_nodes = save_in_nodes + + def on_epoch_begin(self, context): + """ + Insert fake_quantize_op and fake_dequantize_op before trainging and testing. + """ + super(QuantizationStrategy, self).on_compression_begin(context) + if self.start_epoch == context.epoch_id: + _logger.info('QuantizationStrategy::on_epoch_begin') + train_ir_graph = IrGraph( + core.Graph(context.optimize_graph.program.desc), for_test=False) + test_ir_graph = IrGraph( + core.Graph(context.eval_graph.program.desc), for_test=True) + transform_pass = QuantizationTransformPass( + scope=context.scope, + place=context.place, + weight_bits=self.weight_bits, + activation_bits=self.activation_bits, + activation_quantize_type=self.activation_quantize_type) + transform_pass.apply(train_ir_graph) + transform_pass.apply(test_ir_graph) + + build_strategy = BuildStrategy() + build_strategy.enable_inplace = False + build_strategy.memory_optimize = False + # for quantization training + context.optimize_graph.compiled_graph = CompiledProgram( + train_ir_graph.graph).with_data_parallel( + loss_name=context.optimize_graph.out_nodes['loss'], + build_strategy=build_strategy) + # for evaluation. And program compiled from ir graph must be with data parallel. + context.eval_graph.compiled_graph = CompiledProgram( + test_ir_graph.graph).with_data_parallel( + build_strategy=build_strategy) + # for saving inference model after training + context.put('quantization_test_ir_graph_backup', test_ir_graph) + _logger.info('Finish QuantizationStrategy::on_epoch_begin') + + def on_epoch_end(self, context): + """ + Free and save inference model. + """ + super(QuantizationStrategy, self).on_compression_end(context) + + if context.epoch_id == self.end_epoch: + _logger.info('QuantizationStrategy::on_epoch_end') + test_ir_graph = context.get('quantization_test_ir_graph_backup') + # freeze the graph after training + freeze_pass = QuantizationFreezePass( + scope=context.scope, + place=context.place, + weight_bits=self.weight_bits, + activation_bits=self.activation_bits) + freeze_pass.apply(test_ir_graph) + + # for other strategies + context.eval_graph.program = test_ir_graph.to_program() + + if self.save_out_nodes == None: + out_vars = [ + context.eval_graph.var(var_name)._var + for var_name in context.eval_graph.out_nodes.values() + ] + else: + out_vars = [ + context.eval_graph.var(var_name)._var + for var_name in self.save_out_nodes + ] + + if self.save_in_nodes == None: + in_vars = list(context.eval_graph.out_nodes.values()) + else: + in_vars = self.save_in_nodes + + # save float model + if self.float_model_save_path: + executor = Executor(context.place) + io.save_inference_model( + self.float_model_save_path, + in_vars, + out_vars, + executor, + main_program=test_ir_graph.to_program(), + model_filename='model', + params_filename='weights', + export_for_deployment=True) + + # save int8 model + if self.int8_model_save_path: + convert_int8_pass = ConvertToInt8Pass( + scope=context.scope, place=context.place) + convert_int8_pass.apply(test_ir_graph) + + executor = Executor(context.place) + io.save_inference_model( + self.int8_model_save_path, + in_vars, + out_vars, + executor, + main_program=test_ir_graph.to_program(), + model_filename='model', + params_filename='weights', + export_for_deployment=True) + + # save mobile model + if self.mobile_model_save_path: + if not self.int8_model_save_path: + # convert the weights as int8_t type + convert_int8_pass = ConvertToInt8Pass( + scope=context.scope, place=context.place) + convert_int8_pass.apply(test_ir_graph) + # make some changes on the graph for the mobile inference + mobile_pass = TransformForMobilePass() + mobile_pass.apply(test_ir_graph) + executor = Executor(context.place) + io.save_inference_model( + self.mobile_model_save_path, + in_vars, + out_vars, + executor, + main_program=test_ir_graph.to_program(), + model_filename='model', + params_filename='weights', + export_for_deployment=True) + _logger.info('Finish QuantizationStrategy::on_epoch_end') diff --git a/python/paddle/fluid/contrib/slim/tests/distillation/compress.yaml b/python/paddle/fluid/contrib/slim/tests/distillation/compress.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ef89dfb7801e6df8a2cf842a5fcc745d70254977 --- /dev/null +++ b/python/paddle/fluid/contrib/slim/tests/distillation/compress.yaml @@ -0,0 +1,46 @@ +#start_epoch(int): The epoch when to merge student graph and teacher graph for +# distillation training. default: 0 +# +#end_epoch(int): The epoch when to finish distillation training. default: 0 +# +#student_feature_map(str): The name of feature map from student network. +# +#teacher_feature_map(str): The name of feature map from teacher network. +# It's shape should be the same with student network. +# +#student_pairs(list): Each tuple, with two variable names, in student_pairs indicates +# a section in student network. The variables in a tuple should +# have the same feature map size. +# +#teacher_pairs(list): Each tuple, with two variable names, in teacher_pairs indicates +# a section in teacher network. The variables in a tuple should +# have the same feature map size. Varibale named teacher_pairs[i][j] +# should has the save channel number with that of variable named +# student_pairs[i][j]. +# +#distillation_loss_weight(float): The weight of the loss. +version: 1.0 +distillers: + fsp_distiller: + class: 'FSPDistiller' +# teacher_pairs: [['teacher_depthwise_conv2d_1.tmp_0', 'teacher_conv2d_3.tmp_0']] +# student_pairs: [['student_depthwise_conv2d_1.tmp_0', 'student_conv2d_3.tmp_0']] + teacher_pairs: [['teacher_conv2_1_dw.tmp_0', 'teacher_conv1.tmp_0']] + student_pairs: [['student_conv2_1_dw.tmp_0', 'student_conv1.tmp_0']] + distillation_loss_weight: 1 + l2_distiller: + class: 'L2Distiller' + teacher_feature_map: 'teacher.tmp_2' + student_feature_map: 'student.tmp_2' + distillation_loss_weight: 1 +strategies: + distillation_strategy: + class: 'DistillationStrategy' + distillers: ['fsp_distiller', 'l2_distiller'] + start_epoch: 0 + end_epoch: 1 +compressor: + epoch: 1 + checkpoint_path: './distillation_checkpoints/' + strategies: + - distillation_strategy diff --git a/python/paddle/fluid/contrib/slim/tests/filter_pruning/compress.yaml b/python/paddle/fluid/contrib/slim/tests/filter_pruning/compress.yaml index 232276feac5023c45d594015cf7084b000cd5b4a..5f747a049e95a5920236336c69a80a9492e6190d 100644 --- a/python/paddle/fluid/contrib/slim/tests/filter_pruning/compress.yaml +++ b/python/paddle/fluid/contrib/slim/tests/filter_pruning/compress.yaml @@ -29,6 +29,6 @@ strategies: metric_name: 'acc_top1' compressor: epoch: 2 - checkpoint_path: './checkpoints/' + checkpoint_path: './checkpoints_pruning/' strategies: - sensitive_pruning_strategy diff --git a/python/paddle/fluid/contrib/slim/tests/filter_pruning/mobilenet.py b/python/paddle/fluid/contrib/slim/tests/mobilenet.py similarity index 86% rename from python/paddle/fluid/contrib/slim/tests/filter_pruning/mobilenet.py rename to python/paddle/fluid/contrib/slim/tests/mobilenet.py index 0148325a642a2bcbebd3d7794056ff2778a3992d..f5dbef17e8d4a7c474881d88b6619061a3424177 100644 --- a/python/paddle/fluid/contrib/slim/tests/filter_pruning/mobilenet.py +++ b/python/paddle/fluid/contrib/slim/tests/mobilenet.py @@ -35,8 +35,9 @@ train_parameters = { class MobileNet(): - def __init__(self): + def __init__(self, name=""): self.params = train_parameters + self.name = name def net(self, input, class_dim=1000, scale=1.0): # conv1: 112x112 @@ -47,7 +48,7 @@ class MobileNet(): num_filters=int(32 * scale), stride=2, padding=1, - name="conv1") + name=self.name + "_conv1") # 56x56 input = self.depthwise_separable( @@ -57,7 +58,7 @@ class MobileNet(): num_groups=32, stride=1, scale=scale, - name="conv2_1") + name=self.name + "_conv2_1") input = self.depthwise_separable( input, @@ -66,7 +67,7 @@ class MobileNet(): num_groups=64, stride=2, scale=scale, - name="conv2_2") + name=self.name + "_conv2_2") # 28x28 input = self.depthwise_separable( @@ -76,7 +77,7 @@ class MobileNet(): num_groups=128, stride=1, scale=scale, - name="conv3_1") + name=self.name + "_conv3_1") input = self.depthwise_separable( input, @@ -85,7 +86,7 @@ class MobileNet(): num_groups=128, stride=2, scale=scale, - name="conv3_2") + name=self.name + "_conv3_2") # 14x14 input = self.depthwise_separable( @@ -95,7 +96,7 @@ class MobileNet(): num_groups=256, stride=1, scale=scale, - name="conv4_1") + name=self.name + "_conv4_1") input = self.depthwise_separable( input, @@ -104,7 +105,7 @@ class MobileNet(): num_groups=256, stride=2, scale=scale, - name="conv4_2") + name=self.name + "_conv4_2") # 14x14 for i in range(5): @@ -115,7 +116,7 @@ class MobileNet(): num_groups=512, stride=1, scale=scale, - name="conv5" + "_" + str(i + 1)) + name=self.name + "_conv5" + "_" + str(i + 1)) # 7x7 input = self.depthwise_separable( input, @@ -124,7 +125,7 @@ class MobileNet(): num_groups=512, stride=2, scale=scale, - name="conv5_6") + name=self.name + "_conv5_6") input = self.depthwise_separable( input, @@ -133,7 +134,7 @@ class MobileNet(): num_groups=1024, stride=1, scale=scale, - name="conv6") + name=self.name + "_conv6") input = fluid.layers.pool2d( input=input, @@ -142,12 +143,14 @@ class MobileNet(): pool_type='avg', global_pooling=True) - output = fluid.layers.fc(input=input, - size=class_dim, - act='softmax', - param_attr=ParamAttr( - initializer=MSRA(), name="fc7_weights"), - bias_attr=ParamAttr(name="fc7_offset")) + output = fluid.layers.fc( + input=input, + size=class_dim, + act='softmax', + param_attr=ParamAttr( + initializer=MSRA(), name=self.name + "_fc7_weights"), + bias_attr=ParamAttr(name=self.name + "_fc7_offset"), + name=self.name) return output def conv_bn_layer(self, @@ -172,11 +175,13 @@ class MobileNet(): use_cudnn=use_cudnn, param_attr=ParamAttr( initializer=MSRA(), name=name + "_weights"), + name=name, bias_attr=False) bn_name = name + "_bn" return fluid.layers.batch_norm( input=conv, act=act, + name=name, param_attr=ParamAttr(name=bn_name + "_scale"), bias_attr=ParamAttr(name=bn_name + "_offset"), moving_mean_name=bn_name + '_mean', diff --git a/python/paddle/fluid/contrib/slim/tests/quantization/compress.yaml b/python/paddle/fluid/contrib/slim/tests/quantization/compress.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f29eb53f88d22d87b61f82279b676af5ec1ef497 --- /dev/null +++ b/python/paddle/fluid/contrib/slim/tests/quantization/compress.yaml @@ -0,0 +1,48 @@ +#start_epoch(int): The epoch to insert quantization operators. default: 0 +# +#end_epoch(int): The epoch to save inferecne model. default: 0 +# +#float_model_save_path(str): The path to save model with float weights. +# None means it doesn't save float model. defalut: None. +# +#mobile_model_save_path(str): The path to save model for paddle-mobile execution. +# None means it doesn't save mobile model. defalut: None. +# +#int8_model_save_path(str): The path to save model with int8_t weight. +# None means it doesn't save int8 model. defalut: None. +# +#activation_bits(int): quantization bit number for activation. default: 8. +# +#weight_bits(int): quantization bit number for weights. The bias is not quantized. +# default: 8. +# +#activation_quantize_type(str): quantization type for activation, +# now support 'abs_max', 'range_abs_max' and 'moving_average_abs_max'. +# If use 'abs_max' mode, the quantization scale will be calculated +# dynamically each step in both training and testing period. If use +# 'range_abs_max', a static quantization scale will be calculated +# during training and used in inference. +# +#save_in_nodes(list): A list of variable names used to prune graph +# for saving inference model. +# +#save_out_nodes(list): A list of variable names used to prune graph +# for saving inference model. +version: 1.0 +strategies: + quantization_strategy: + class: 'QuantizationStrategy' + start_epoch: 0 + end_epoch: 0 + float_model_save_path: './output/float' + weight_bits: 8 + activation_bits: 8 + weight_quantize_type: 'abs_max' + activation_quantize_type: 'abs_max' + save_in_nodes: ['image'] + save_out_nodes: ['quan.tmp_2'] +compressor: + epoch: 1 + checkpoint_path: './checkpoints_quan/' + strategies: + - quantization_strategy diff --git a/python/paddle/fluid/contrib/slim/tests/test_distillation_strategy.py b/python/paddle/fluid/contrib/slim/tests/test_distillation_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..9b967c0ac7d2bfdab23d4557ef0b7d28f4118ff7 --- /dev/null +++ b/python/paddle/fluid/contrib/slim/tests/test_distillation_strategy.py @@ -0,0 +1,94 @@ +# copyright (c) 2019 paddlepaddle authors. all rights reserved. +# +# licensed under the apache license, version 2.0 (the "license"); +# you may not use this file except in compliance with the license. +# you may obtain a copy of the license at +# +# http://www.apache.org/licenses/license-2.0 +# +# unless required by applicable law or agreed to in writing, software +# distributed under the license is distributed on an "as is" basis, +# without warranties or conditions of any kind, either express or implied. +# see the license for the specific language governing permissions and +# limitations under the license. + +import paddle +import unittest +import paddle.fluid as fluid +from mobilenet import MobileNet +from paddle.fluid.contrib.slim.core import Compressor +from paddle.fluid.contrib.slim.graph import GraphWrapper + + +class TestDistillationStrategy(unittest.TestCase): + """ + Test API of distillation strategy. + """ + + def test_compression(self): + if not fluid.core.is_compiled_with_cuda(): + return + class_dim = 10 + image_shape = [1, 28, 28] + image = fluid.layers.data( + name='image', shape=image_shape, dtype='float32') + image.stop_gradient = False + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + out = MobileNet(name="student").net(input=image, class_dim=class_dim) + acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) + acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) + val_program = fluid.default_main_program().clone(for_test=False) + + cost = fluid.layers.cross_entropy(input=out, label=label) + avg_cost = fluid.layers.mean(x=cost) + optimizer = fluid.optimizer.Momentum( + momentum=0.9, + learning_rate=0.01, + regularization=fluid.regularizer.L2Decay(4e-5)) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + val_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128) + + val_feed_list = [('img', image.name), ('label', label.name)] + val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', + acc_top5.name)] + + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128) + train_feed_list = [('img', image.name), ('label', label.name)] + train_fetch_list = [('loss', avg_cost.name)] + + # define teacher program + teacher_program = fluid.Program() + startup_program = fluid.Program() + with fluid.program_guard(teacher_program, startup_program): + img = teacher_program.global_block()._clone_variable( + image, force_persistable=False) + predict = MobileNet(name="teacher").net(input=img, + class_dim=class_dim) + + exe.run(startup_program) + + com_pass = Compressor( + place, + fluid.global_scope(), + fluid.default_main_program(), + train_reader=train_reader, + train_feed_list=train_feed_list, + train_fetch_list=train_fetch_list, + eval_program=val_program, + eval_reader=val_reader, + eval_feed_list=val_feed_list, + eval_fetch_list=val_fetch_list, + teacher_programs=[teacher_program.clone(for_test=True)], + train_optimizer=optimizer, + distiller_optimizer=optimizer) + com_pass.config('./distillation/compress.yaml') + eval_graph = com_pass.run() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/contrib/slim/tests/test_filter_pruning.py b/python/paddle/fluid/contrib/slim/tests/test_filter_pruning.py index d73ee27779a0d17a0f60df645a6d2946d665c01e..e1763039b3a962a43f2fe3a22c05cb32cba596ed 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_filter_pruning.py +++ b/python/paddle/fluid/contrib/slim/tests/test_filter_pruning.py @@ -15,7 +15,7 @@ import paddle import unittest import paddle.fluid as fluid -from filter_pruning.mobilenet import MobileNet +from mobilenet import MobileNet from paddle.fluid.contrib.slim.core import Compressor from paddle.fluid.contrib.slim.graph import GraphWrapper diff --git a/python/paddle/fluid/contrib/slim/tests/test_graph_wrapper.py b/python/paddle/fluid/contrib/slim/tests/test_graph_wrapper.py index ad82aa941183d72353dae19527b21286d6473a63..0ab8052d7ab16743bb6589dbb44203e70fa907d0 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_graph_wrapper.py +++ b/python/paddle/fluid/contrib/slim/tests/test_graph_wrapper.py @@ -86,7 +86,11 @@ class TestGraphWrapper(unittest.TestCase): def test_all_vars(self): self.build_program() - self.assertEquals(len(self.train_graph.vars()), 90) + # self.assertEquals(len(self.train_graph.vars()), 90) + # activation inplace has been disabled in python side + # which may produce more variable in program_desc + # update 90 => 94 + self.assertEquals(len(self.train_graph.vars()), 94) def test_numel_params(self): self.build_program() diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantization_strategy.py b/python/paddle/fluid/contrib/slim/tests/test_quantization_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..92afd892afed86e69266c9ab9c97d90daebb86d5 --- /dev/null +++ b/python/paddle/fluid/contrib/slim/tests/test_quantization_strategy.py @@ -0,0 +1,82 @@ +# copyright (c) 2019 paddlepaddle authors. all rights reserved. +# +# licensed under the apache license, version 2.0 (the "license"); +# you may not use this file except in compliance with the license. +# you may obtain a copy of the license at +# +# http://www.apache.org/licenses/license-2.0 +# +# unless required by applicable law or agreed to in writing, software +# distributed under the license is distributed on an "as is" basis, +# without warranties or conditions of any kind, either express or implied. +# see the license for the specific language governing permissions and +# limitations under the license. + +import paddle +import unittest +import paddle.fluid as fluid +from mobilenet import MobileNet +from paddle.fluid.contrib.slim.core import Compressor +from paddle.fluid.contrib.slim.graph import GraphWrapper + + +class TestQuantizationStrategy(unittest.TestCase): + """ + Test API of quantization strategy. + """ + + def test_compression(self): + if not fluid.core.is_compiled_with_cuda(): + return + class_dim = 10 + image_shape = [1, 28, 28] + image = fluid.layers.data( + name='image', shape=image_shape, dtype='float32') + image.stop_gradient = False + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + out = MobileNet(name='quan').net(input=image, class_dim=class_dim) + acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) + acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) + val_program = fluid.default_main_program().clone(for_test=False) + + cost = fluid.layers.cross_entropy(input=out, label=label) + avg_cost = fluid.layers.mean(x=cost) + + optimizer = fluid.optimizer.Momentum( + momentum=0.9, + learning_rate=0.01, + regularization=fluid.regularizer.L2Decay(4e-5)) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + val_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128) + + val_feed_list = [('img', image.name), ('label', label.name)] + val_fetch_list = [('acc_top1', acc_top1.name), ('acc_top5', + acc_top5.name)] + + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=128) + train_feed_list = [('img', image.name), ('label', label.name)] + train_fetch_list = [('loss', avg_cost.name)] + + com_pass = Compressor( + place, + fluid.global_scope(), + fluid.default_main_program(), + train_reader=train_reader, + train_feed_list=train_feed_list, + train_fetch_list=train_fetch_list, + eval_program=val_program, + eval_reader=val_reader, + eval_feed_list=val_feed_list, + eval_fetch_list=val_fetch_list, + train_optimizer=optimizer) + com_pass.config('./quantization/compress.yaml') + eval_graph = com_pass.run() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/contrib/tests/test_calibration.py b/python/paddle/fluid/contrib/tests/test_calibration.py index 1a046a79415f9edbfde3f7e01d7ab78177a8641b..00885eb5d6057b4a7738705007a9334da6aea9d0 100644 --- a/python/paddle/fluid/contrib/tests/test_calibration.py +++ b/python/paddle/fluid/contrib/tests/test_calibration.py @@ -136,7 +136,7 @@ class TestCalibrationForResnet50(unittest.TestCase): "full_data", False) else: data_urls.append( - 'http://paddle-inference-dist.cdn.bcebos.com/int8/calibration_test_data.tar.gz' + 'http://paddle-inference-dist.bj.bcebos.com/int8/calibration_test_data.tar.gz' ) data_md5s.append('1b6c1c434172cca1bf9ba1e4d7a3157d') self.data_cache_folder = self.download_data(data_urls, data_md5s, @@ -189,7 +189,7 @@ class TestCalibrationForResnet50(unittest.TestCase): def download_model(self): # resnet50 fp32 data data_urls = [ - 'http://paddle-inference-dist.cdn.bcebos.com/int8/resnet50_int8_model.tar.gz' + 'http://paddle-inference-dist.bj.bcebos.com/int8/resnet50_int8_model.tar.gz' ] data_md5s = ['4a5194524823d9b76da6e738e1367881'] self.model_cache_folder = self.download_data(data_urls, data_md5s, @@ -307,7 +307,7 @@ class TestCalibrationForMobilenetv1(TestCalibrationForResnet50): def download_model(self): # mobilenetv1 fp32 data data_urls = [ - 'http://paddle-inference-dist.cdn.bcebos.com/int8/mobilenetv1_int8_model.tar.gz' + 'http://paddle-inference-dist.bj.bcebos.com/int8/mobilenetv1_int8_model.tar.gz' ] data_md5s = ['13892b0716d26443a8cdea15b3c6438b'] self.model_cache_folder = self.download_data(data_urls, data_md5s, diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index 3dac41ce43d61c02f3e11087aef98e2fc454556b..00c4e5691a23a9864ed3e8964f4cafaf9588c665 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -26,6 +26,24 @@ from .framework import Variable, default_main_program __all__ = ['DataFeeder'] +def convert_dtype(dtype): + if dtype == core.VarDesc.VarType.FP32: + return 'float32' + elif dtype == core.VarDesc.VarType.INT64: + return 'int64' + elif dtype == core.VarDesc.VarType.FP64: + return 'float64' + elif dtype == core.VarDesc.VarType.FP16: + return 'float16' + elif dtype == core.VarDesc.VarType.INT32: + return 'int32' + elif dtype == core.VarDesc.VarType.UINT8: + return 'uint8' + else: + raise ValueError("dtype must be any of [int32, float32, int64, " + "float64, uint8]") + + class DataToLoDTensorConverter(object): def __init__(self, place, lod_level, shape, dtype): self.place = place @@ -38,27 +56,12 @@ class DataToLoDTensorConverter(object): if negtive_count > 1: self.shape = None break - if dtype == core.VarDesc.VarType.FP32: - self.dtype = 'float32' - elif dtype == core.VarDesc.VarType.INT64: - self.dtype = 'int64' - elif dtype == core.VarDesc.VarType.FP64: - self.dtype = 'float64' - elif dtype == core.VarDesc.VarType.FP16: - self.dtype = 'float16' - elif dtype == core.VarDesc.VarType.INT32: - self.dtype = 'int32' - elif dtype == core.VarDesc.VarType.UINT8: - self.dtype = 'uint8' - else: - raise ValueError("dtype must be any of [int32, float32, int64, " - "float64, uint8]") + self.dtype = convert_dtype(dtype) + self._reset() + def _reset(self): self.data = [] - self.lod = [] - - for i in six.moves.range(lod_level): - self.lod.append([]) + self.lod = [[] for _ in six.moves.range(self.lod_level)] def feed(self, data): self._feed_impl_(data, self.lod, self.lod_level) @@ -88,15 +91,52 @@ class DataToLoDTensorConverter(object): raise ValueError( "Reshape error. What is defined in data layer is {}, but receive {}" .format(self.shape, arr.shape)) - #else: - # self._check_shape(arr.shape) t = core.LoDTensor() t.set(arr, self.place) if self.lod_level > 0: t.set_recursive_sequence_lengths(self.lod) + self._reset() return t +class BatchedTensorProvider(object): + def __init__(self, feed_list, place, batch_size, generator, drop_last): + self.place = place + self.batch_size = batch_size + self.generator = generator + self.converters = [] + self.drop_last = drop_last + + for var in feed_list: + assert var.lod_level == 0, "lod_level must be 0" + self.converters.append( + DataToLoDTensorConverter( + place=self.place, + lod_level=0, + shape=var.shape, + dtype=var.dtype)) + + def _done(self): + return [c.done() for c in self.converters] + + def __call__(self): + idx = 0 + for each_sample in self.generator(): + for each_slot, each_converter in six.moves.zip(each_sample, + self.converters): + each_converter.data.append(each_slot) + + idx += 1 + if idx == self.batch_size: + idx = 0 + yield self._done() + + if not self.drop_last and idx > 0: + yield self._done() + else: + [c._reset() for c in self.converters] + + class DataFeeder(object): """ DataFeeder converts the data that returned by a reader into a data diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 03aa9917f3201e690a7072442cf11ac2284b03c5..018e38cbb3f2676ac05f1a27e9e92b6e0f16efb0 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -564,6 +564,10 @@ class Executor(object): if feed is None: feed = {} + elif isinstance(feed, (list, tuple)): + assert len(feed) == 1, "Not compiled with data parallel" + feed = feed[0] + if not isinstance(feed, dict): raise TypeError( "feed requires dict as its Parameter. But you passed in %s" % diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index e4169c247f40f1944f98ddd185e55b404bdbf9e3..85e1916a3a5f437b540fa8a1496d3b0d5fa2e3de 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -26,6 +26,7 @@ import six import numpy as np import subprocess +import multiprocessing from .. import compat as cpt from .proto import framework_pb2 @@ -63,6 +64,9 @@ __all__ = [ 'default_main_program', 'program_guard', 'name_scope', + 'cuda_places', + 'cpu_places', + 'cuda_pinned_places', ] EMPTY_VAR_NAME = core.kEmptyVarName() @@ -87,6 +91,87 @@ def _current_expected_place(): return _imperative_current_expected_place_ +def _cpu_num(): + return int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + + +def cuda_places(device_ids=None): + ''' + Create a list of :code:`fluid.CUDAPlace` objects. + + If :code:`device_ids` is None, environment variable of + :code:`FLAGS_selected_gpus` would be checked first. If + :code:`FLAGS_selected_gpus=0,1,2`, the returned list would + be [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)]. + If :code:`FLAGS_selected_gpus` is not set, all visible + gpu places would be returned. + + If :code:`device_ids` is not None, it should be the device + ids of gpus. For example, if :code:`device_ids=[0,1,2]`, + the returned list would be + [fluid.CUDAPlace(0), fluid.CUDAPlace(1), fluid.CUDAPlace(2)]. + + Args: + device_ids (None|list(int)|tuple(int)): gpu device id list. + + Returns: + out (list(fluid.CUDAPlace)): gpu place list. + ''' + assert core.is_compiled_with_cuda(), \ + "Not compiled with CUDA" + if device_ids is None: + gpus_env = os.getenv("FLAGS_selected_gpus") + if gpus_env: + device_ids = [int(s) for s in gpus_env.split(",")] + else: + device_ids = six.moves.range(core.get_cuda_device_count()) + elif not isinstance(device_ids, (list, tuple)): + device_ids = [device_ids] + return [core.CUDAPlace(dev_id) for dev_id in device_ids] + + +def cpu_places(device_count=None): + ''' + Create a list of :code:`fluid.CPUPlace` objects. + + If :code:`device_count` is None, the device count would + be determined by environment variable :code:`CPU_NUM`. + If :code:`CPU_NUM` is not set, the device count would + be determined by :code:`multiprocessing.cpu_count()`. + + Args: + device_count (None|int): device number. + + Returns: + out (list(fluid.CPUPlace)): cpu place list. + ''' + if device_count is None: + device_count = _cpu_num() + return [core.CPUPlace()] * device_count + + +def cuda_pinned_places(device_count=None): + ''' + Create a list of :code:`fluid.CUDAPinnedPlace` objects. + + If :code:`device_count` is None, the device count would + be determined by environment variable :code:`CPU_NUM`. + If :code:`CPU_NUM` is not set, the device count would + be determined by :code:`multiprocessing.cpu_count()`. + + Args: + device_count (None|int): device number. + + Returns: + out (list(fluid.CUDAPinnedPlace)): cuda pinned place list. + ''' + assert core.is_compiled_with_cuda(), \ + "Not compiled with CUDA" + if device_count is None: + device_count = _cpu_num() + return [core.cuda_pinned_places()] * device_count + + class NameScope(object): def __init__(self, name="", parent=None): self._children = dict() @@ -318,8 +403,8 @@ class Variable(object): self._ivar = core.VarBase( name, dtype if dtype else core.VarDesc.VarType.FP32, list(shape) if shape else [], - _current_expected_place(), True - if persistable else False, stop_gradient) + _current_expected_place(), stop_gradient, True + if persistable else False) if persistable: _imperative_tracer().trace_var(name, self) else: @@ -542,6 +627,183 @@ class Variable(object): """ self.error_clip = error_clip + def _slice_indices(self, slice, length): + """ + Reference implementation for the slice.indices method. + """ + # Compute step and length as integers. + step = 1 if slice.step is None else slice.step + + # Raise ValueError for negative length or zero step. + if length < 0: + raise ValueError("length should not be negative") + if step == 0: + raise ValueError("slice step cannot be zero") + + # Find lower and upper bounds for start and stop. + lower = -1 if step < 0 else 0 + upper = length - 1 if step < 0 else length + + # Compute start. + if slice.start is None: + start = upper if step < 0 else lower + else: + start = slice.start + start = max(start + length, lower) if start < 0 else min(start, + upper) + + # Compute stop. + if slice.stop is None: + stop = lower if step < 0 else upper + else: + stop = slice.stop + stop = max(stop + length, lower) if stop < 0 else min(stop, upper) + + return start, stop, step + + def _detectEllipsis(self, item): + has_ellipsis = False + start = 0 + end = len(self.shape) + for index, o in enumerate(item): + if o is Ellipsis: + if has_ellipsis: + raise ValueError("Index can have one ellipsis only.") + has_ellipsis = True + start = index + else: + if has_ellipsis: + end = index + return has_ellipsis, start, end + + def _reconstructSliceinfo(self, item): + has_ellipsis, start, end = self._detectEllipsis(item) + if has_ellipsis: + newitem = [] + for i in range(start): + newitem.append(item[i]) + for i in range(start, end): + newitem.append(slice(None, None, None)) + for i in range(end, len(item)): + newitem.append(item[i]) + return newitem + else: + return None + + def _detectContinuesSlice(self, item): + starts = [] + ends = [] + for index, o in enumerate(item): + if isinstance(o, int): + start = int(o) + if (index > 0 and index >= self.shape[index]) \ + or (index < 0 and (index + self.shape[index]) < 0): + raise IndexError("invalid index") + start = max(start + self.shape[index], 0) if start < 0 else min( + start, self.shape[index]) + starts.append(start) + ends.append(start + 1) + elif isinstance(o, slice): + start, stop, step = self._slice_indices(o, self.shape[index]) + if step == 1 or step == -1: + starts.append(start) + ends.append(stop) + else: + return False, None + else: + raise IndexError("Valid index accept int or slice or ellipsis") + return True, [starts, ends] + + def _cloneVar(self, copy=False): + if not copy: + return self.block.create_var( + name=unique_name.generate(".".join(self.name)), + dtype=self.dtype, + persistable=self.persistable, + stop_gradient=self._stop_gradient, ) + else: + return self + + def _sliceVar(self, axes, starts, ends): + new_var = self._cloneVar() + self.block.append_op( + type="slice", + inputs={'Input': [self]}, + outputs={'Out': [new_var]}, + attrs={'axes': axes, + 'starts': starts, + 'ends': ends}) + return new_var + + def _concatVar(self, inputs, axis): + new_var = self._cloneVar() + self.block.append_op( + type="concat", + inputs={'X': inputs}, + outputs={'Out': [new_var]}, + attrs={'axis': axis, }) + return new_var + + def _sliceAndConcatVar(self, item, axis): + if isinstance(item, slice): + if self.shape[axis] < 0: + return self._cloneVar(True) + start, stop, step = self._slice_indices(item, self.shape[axis]) + if step == 1: + return self._sliceVar([axis], [start], [stop]) + else: + vars = [] + if step > 0: + while start < stop: + vars.append( + self._sliceVar([axis], [start], [start + 1])) + start += step + else: + while start > stop: + vars.append( + self._sliceVar([axis], [start], [start + 1])) + start += step + return self._concatVar(vars, axis) + elif isinstance(item, int): + if self.shape[axis] < 0: + return self._cloneVar(True) + index = int(item) + if (index > 0 and index >= self.shape[axis])\ + or (index < 0 and (index + self.shape[axis]) < 0): + raise IndexError("invalid index") + return self._sliceVar([axis], [index], [index + 1]) + else: + raise IndexError("Valid index accept int or slice or tuple") + + def __getitem__(self, item): + """ + Slice the variable. + + Args: + item(int/slice/tuple) : the index. + + Returns: + Sliced variable + """ + new_var = None + if isinstance(item, tuple): + if len(item) > len(self.shape): + raise IndexError("Too many indexes") + newitem = self._reconstructSliceinfo(item) or item + check, info = self._detectContinuesSlice(newitem) + if check: + starts = info[0] + ends = info[1] + axes = [i for i in range(len(starts))] + return self._sliceVar(axes, starts, ends) + else: + new_var = self + for index, o in enumerate(newitem): + new_var = new_var._sliceAndConcatVar(o, index) + else: + new_var = self._sliceAndConcatVar(item, 0) + return new_var + def get_all_op_protos(): """ @@ -659,7 +921,7 @@ class Operator(object): if _in_imperative_mode(): if type is None: raise ValueError( - "`type` to initilized an Operator can not be None.") + "`type` to initialized an Operator can not be None.") self.iop = core.OpBase(type) # TODO(minqiyang): remove these lines after we take apart all @@ -821,7 +1083,10 @@ class Operator(object): @property def type(self): - return self.desc.type() + if _in_imperative_mode(): + return self.iop.type + else: + return self.desc.type() def input(self, name): """ @@ -1559,12 +1824,15 @@ class Block(object): name=v.name) self.vars[new_p.name] = new_p - def _clone_variable(self, var): + def _clone_variable(self, var, force_persistable=True): """ Clone a variable into current block. Args: var: the variable to be cloned. + force_persistable(bool): True means setting the result variable to being persistable. + False means setting the persistable the same with that of input var. + default: True. Returns: Variable: the new variable cloned from 'var' in current block. @@ -1584,7 +1852,7 @@ class Block(object): shape=var.shape, dtype=var.dtype, type=var.type, - persistable=True, + persistable=True if force_persistable else var.persistable, is_data=var.is_data) else: ret_var = self.create_var( @@ -1593,7 +1861,7 @@ class Block(object): dtype=var.dtype, type=var.type, lod_level=var.lod_level, - persistable=True, + persistable=True if force_persistable else var.persistable, is_data=var.is_data) return ret_var @@ -1964,6 +2232,28 @@ class IrOpNode(IrNode): else: desc._set_attr(name, val) + def input_arg_names(self): + """ + Return input arguments' names of this op node. + + Returns: + list(str): input arguments' names of this op node. + """ + assert self.node.op() is not None, \ + "The node operator description cannot be None." + return self.node.op().input_arg_names() + + def output_arg_names(self): + """ + Return output arguments' names of this op node. + + Returns: + list(str): output arguments' names of this op node. + """ + assert self.node.op() is not None, \ + "The node operator description cannot be None." + return self.node.op().output_arg_names() + @property def inputs(self): """ @@ -2054,31 +2344,38 @@ class IrGraph(object): """ return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()} - def var_node(self, name): + def _find_var_node(self, key): """ - Get a variable node by name from the graph. + Get a variable node by the `key` from this graph. The key + can be a node name or a node id. + + WARNS: + There are some nodes may have the same name. So, be + cautious about using this method when you find the + target var node by its name. Args: - name(str): the name of the variable node. + key(str|int): The str type denotes that the target variable node's name. + And the int type denotes that the target variable node's id. Raises: - ValueError: The If input's type is not str, or this graph - doesn't have a variable with the giving name. + ValueError: If this graph doesn't have a variable with the giving name or id. Returns: - IrVarNode: the variable node with the giving name. + IrVarNode: the variable node with the giving name or id. """ - if not isinstance(name, six.string_types): - raise TypeError( - "var require string as parameter, but get %s instead." % - (type(name))) target_var_node = None var_nodes = self.all_var_nodes() - for var_node in var_nodes: - if var_node.name() == name: - target_var_node = var_node + if isinstance(key, six.string_types): + for var_node in var_nodes: + if var_node.name() == key: + target_var_node = var_node + elif isinstance(key, int): + for var_node in var_nodes: + if var_node.id() == key: + target_var_node = var_node if target_var_node is None: - raise ValueError("var_node %s not in this graph" % name) + raise ValueError("var_node %s not in this graph" % key) return target_var_node def create_persistable_node(self, name, var_type, shape, var_dtype): @@ -2224,6 +2521,34 @@ class IrGraph(object): original_nodes = {n.node for n in remove_nodes} core.graph_safe_remove_nodes(self.graph, original_nodes) + def resolve_hazard(self): + def _to_node(nodes, node_name): + target_node = None + for n in nodes: + if n.name() == node_name: + target_node = n + assert target_node is not None, "Cannot find the target node in the giving set." + return target_node + + ordered_nodes = core.topology_sort(self.graph) + var_nodes = dict() + for node in ordered_nodes: + if node.is_op() and node.op() is not None: + for each_var_name in node.op().input_arg_names(): + if each_var_name not in var_nodes: + var_nodes[each_var_name] = [ + _to_node(node.inputs, each_var_name) + ] + for each_var_name in node.op().output_arg_names(): + if each_var_name not in var_nodes: + var_nodes[each_var_name] = [ + _to_node(node.outputs, each_var_name) + ] + else: + var_nodes[each_var_name].append( + _to_node(node.outputs, each_var_name)) + self.graph.resolve_hazard(var_nodes) + def has_circle(self): """ Check if the graph has a circle. diff --git a/python/paddle/fluid/imperative/base.py b/python/paddle/fluid/imperative/base.py index 174f138bfa2d3cfaa433c3235c2b0f9a5650e756..097cd2be35b01aced30486b874f202381c4d9962 100644 --- a/python/paddle/fluid/imperative/base.py +++ b/python/paddle/fluid/imperative/base.py @@ -44,7 +44,7 @@ def guard(place=None): yield -def to_variable(value, block=None): +def to_variable(value, block=None, name=None): if isinstance(value, np.ndarray): assert enabled(), "to_variable could only be called in imperative mode" @@ -53,9 +53,10 @@ def to_variable(value, block=None): py_var = framework.Variable( block, type=core.VarDesc.VarType.LOD_TENSOR, - name=None, + name=name, shape=value.shape, - dtype=value.dtype) + dtype=value.dtype, + stop_gradient=True) var = py_var._ivar.value() tensor = var.get_tensor() tensor.set(value, framework._current_expected_place()) diff --git a/python/paddle/fluid/imperative/layer_object_helper.py b/python/paddle/fluid/imperative/layer_object_helper.py index 6afffe3636dd79d124a5b0e9d9eccb02630f5b8c..3d4426e8cdfe79a6fa2d6452e7cb3ab0a458c0bc 100644 --- a/python/paddle/fluid/imperative/layer_object_helper.py +++ b/python/paddle/fluid/imperative/layer_object_helper.py @@ -105,6 +105,7 @@ class LayerObjectHelper(LayerHelperBase): Returns dtype of the input """ + inputs_in = inputs_in if (inputs_in is not None) else [] inputs = self._multiple_input(inputs_in) dtype = None for each in inputs: @@ -191,13 +192,7 @@ class LayerObjectHelper(LayerHelperBase): act['use_mkldnn'] = use_mkl_dnn act_type = act.pop('type') - tmp = input_var - # NOTE(dzhwinter): some activation support inplace compution. - # NOTE(minqiyang): currently, we don't support inplace in imperative mode - if not _in_imperative_mode() and core.IsInplace(act_type): - tmp = input_var - else: - tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) + tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type=act_type, inputs={"X": [input_var]}, diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/imperative/layers.py index a8a1aac8b0c74bcfd57b674d01600672788b016a..e64667f7f467d0d0a3c07d14ce22c3f231e82eb6 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/imperative/layers.py @@ -17,10 +17,12 @@ import contextlib import sys import numpy as np import collections +import six from .. import unique_name from paddle.fluid import core from .layer_object_helper import LayerObjectHelper from paddle.fluid import framework +from ..param_attr import ParamAttr __all__ = ['Layer', 'PyLayer'] @@ -72,6 +74,10 @@ class Layer(core.Layer): Returns created parameter Variable. """ + if isinstance(attr, ParamAttr) and (attr.name is not None): + attr.name = ".".join([self._full_name, attr.name]) + elif isinstance(attr, six.string_types): + attr = ".".join([self._full_name, attr]) return self._helper.create_parameter(attr, shape, dtype, is_bias, default_initializer) @@ -164,6 +170,7 @@ class Layer(core.Layer): the sublayer passed in. """ assert isinstance(sublayer, core.Layer) + self._sub_layers[name] = sublayer return sublayer diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/imperative/nn.py index 604ff753491925be8194522b3efdb77a7e9c3dfa..9856276b20b7affb548847d359463451bb238518 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/imperative/nn.py @@ -15,15 +15,20 @@ from __future__ import print_function from six.moves import reduce +import numpy as np from .. import core from ..layers import utils from . import layers from ..framework import Variable, OpProtoHolder +from ..layers import layer_function_generator from ..param_attr import ParamAttr -from ..initializer import Normal, Constant +from ..initializer import Normal, Constant, NumpyArrayInitializer -__all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding', 'GRUUnit'] +__all__ = [ + 'Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding', 'GRUUnit', 'LayerNorm', + 'NCE', 'PRelu', 'BilinearTensorProduct', 'Conv2DTranspose', 'SequenceConv' +] class Conv2D(layers.Layer): @@ -438,7 +443,6 @@ class Embedding(layers.Layer): self._size = size self._is_sparse = is_sparse self._is_distributed = is_distributed - self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( size[0] + padding_idx) @@ -471,6 +475,131 @@ class Embedding(layers.Layer): return out +class LayerNorm(layers.Layer): + def __init__(self, + name_scope, + scale=True, + shift=True, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None): + """ + ${comment} + + The formula is as follows: + + .. math:: + + \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} a_i + + \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}(a_i - \\mu)^2} + + h & = f(\\frac{g}{\\sigma}(a - \\mu) + b) + + * :math:`a`: the vector representation of the summed inputs to the neurons + in that layer. + + * :math:`H`: the number of hidden units in a layers + + * :math:`g`: the trainable scale parameter. + + * :math:`b`: the trainable bias parameter. + + Args: + input(Variable): The input tensor variable. + scale(bool): Whether to learn the adaptive gain :math:`g` after + normalization. Default True. + shift(bool): Whether to learn the adaptive bias :math:`b` after + normalization. Default True. + begin_norm_axis(int): The normalization will be performed along + dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`. + Default 1. + epsilon(float): The small value added to the variance to prevent + division by zero. Default 1e-05. + param_attr(ParamAttr|None): The parameter attribute for the learnable + gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is + omitted. If :attr:`scale` is True and :attr:`param_attr` is None, + a default :code:`ParamAttr` would be added as scale. The + :attr:`param_attr` is initialized as 1 if it is added. Default None. + bias_attr(ParamAttr|None): The parameter attribute for the learnable + bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is + omitted. If :attr:`shift` is True and :attr:`param_attr` is None, + a default :code:`ParamAttr` would be added as bias. The + :attr:`bias_attr` is initialized as 0 if it is added. Default None. + act(str): Activation to be applied to the output of layer normalizaiton. + Default None. + Returns: + ${y_comment} + + Examples: + + >>> data = fluid.layers.data(name='data', shape=[3, 32, 32], + >>> dtype='float32') + >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) + """ + + super(LayerNorm, self).__init__(name_scope) + self._scale = scale + self._shift = shift + self._begin_norm_axis = begin_norm_axis + self._epsilon = epsilon + self._param_attr = param_attr + self._bias_attr = bias_attr + self._act = act + + def _build_once(self, input): + self._dtype = self._helper.input_dtype(input) + input_shape = input.shape + param_shape = [ + reduce(lambda x, y: x * y, input_shape[self._begin_norm_axis:]) + ] + if self._scale: + self._scale_w = self.create_parameter( + attr=self._param_attr, + shape=param_shape, + dtype=self._dtype, + default_initializer=Constant(1.0)) + if self._shift: + assert self._bias_attr is not False + self._bias_w = self.create_parameter( + attr=self._bias_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=True) + + def forward(self, input): + inputs = dict() + inputs['X'] = input + if self._scale: + inputs['Scale'] = self._scale_w + if self._shift: + inputs['Bias'] = self._bias_w + # create output + mean_out = self._helper.create_variable_for_type_inference( + dtype=self._dtype, stop_gradient=True) + variance_out = self._helper.create_variable_for_type_inference( + dtype=self._dtype, stop_gradient=True) + layer_norm_out = self._helper.create_variable_for_type_inference( + self._dtype) + + self._helper.append_op( + type="layer_norm", + inputs=inputs, + outputs={ + "Y": layer_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={ + "epsilon": self._epsilon, + "begin_norm_axis": self._begin_norm_axis + }) + + return self._helper.append_activation(layer_norm_out) + + class GRUUnit(layers.Layer): """ **GRU unit layer** @@ -603,3 +732,668 @@ class GRUUnit(layers.Layer): }) return updated_hidden, reset_hidden_pre, gate + + +class NCE(layers.Layer): + """ + ${comment} + + Args: + input (Variable): input variable. + label (Variable): label. + num_total_classes (int):${num_total_classes_comment} + sample_weight (Variable|None): A Variable of shape [batch_size, 1] + storing a weight for each sample. The default weight for each + sample is 1.0. + param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights + of nce. If it is set to None or one attribute of ParamAttr, nce + will create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of nce. + If it is set to False, no bias will be added to the output units. + If it is set to None or one attribute of ParamAttr, nce + will create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + num_neg_samples (int): ${num_neg_samples_comment} + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Default: None. + sampler (str): The sampler used to sample class from negtive classes. + It can be 'uniform', 'log_uniform' or 'custom_dist'. + default: 'uniform'. + custom_dist (float[]): A float[] with size=num_total_classes. + It is used when sampler is set to 'custom_dist'. + custom_dist[i] is the probsbility of i-th class to be sampled. + default: None. + seed (int): The seed used in sampler. default: 0. + is_sparse(bool): The flag indicating whether to use sparse update, the weight@GRAD and bias@GRAD will be changed to SelectedRows. + + Returns: + Variable: The output nce loss. + + Examples: + .. code-block:: python + + window_size = 5 + words = [] + for i in xrange(window_size): + words.append(layers.data( + name='word_{0}'.format(i), shape=[1], dtype='int64')) + + dict_size = 10000 + label_word = int(window_size / 2) + 1 + + embs = [] + for i in xrange(window_size): + if i == label_word: + continue + + emb = layers.embedding(input=words[i], size=[dict_size, 32], + param_attr='emb.w', is_sparse=True) + embs.append(emb) + + embs = layers.concat(input=embs, axis=1) + loss = layers.nce(input=embs, label=words[label_word], + num_total_classes=dict_size, param_attr='nce.w', + bias_attr='nce.b') + + #or use custom distribution + dist = fluid.layers.assign(input=np.array([0.05,0.5,0.1,0.3,0.05]).astype("float32")) + loss = layers.nce(input=embs, label=words[label_word], + num_total_classes=5, param_attr='nce.w', + bias_attr='nce.b', + num_neg_samples=3, + sampler="custom_dist", + custom_dist=dist) + + """ + + def __init__(self, + name_scope, + num_total_classes, + param_attr=None, + bias_attr=None, + num_neg_samples=None, + sampler="uniform", + custom_dist=None, + seed=0, + is_sparse=False): + super(NCE, self).__init__(name_scope) + self._param_attr = param_attr + self._bias_attr = bias_attr + self._num_total_classes = num_total_classes + + self._inputs = dict() + + if sampler == "uniform": + sampler = 0 + elif sampler == "log_uniform": + sampler = 1 + elif sampler == "custom_dist": + assert custom_dist is not None + # assert isinstance(custom_dist, Variable) + + custom_dist_len = len(custom_dist) + alias_probs_ = [0] * custom_dist_len + alias_ = [0] * custom_dist_len + bigs = [] + littles = [] + for i in range(custom_dist_len): + normal_prob = custom_dist[i] * custom_dist_len + if normal_prob - 1.0 > 0: + bigs.append((i, normal_prob)) + elif 1.0 - normal_prob > 0: + littles.append((i, normal_prob)) + else: + alias_probs_[i] = normal_prob + alias_[i] = -1 + + while len(bigs) and len(littles): + big = bigs.pop(0) + little = littles.pop(0) + + big_idx = big[0] + big_prob = big[1] + + alias_probs_[little[0]] = little[1] + alias_[little[0]] = big_idx + big_left = big[1] + little[1] - 1 + if big_left - 1.0 > 0: + bigs.append((big_idx, big_left)) + elif 1.0 - big_left > 0: + littles.append((big_idx, big_left)) + else: + alias_probs_[big_idx] = big_left + alias_[big_idx] = -1 + + if len(bigs): + big = bigs.pop(0) + alias_probs_[big[0]] = 1.0 + alias_[big[0]] = -1 + if len(littles): + little = littles.pop(0) + alias_probs_[little[0]] = 1.0 + alias_[little[0]] = -1 + + def _init_by_numpy_array(numpy_array): + ret = self.create_parameter( + attr=ParamAttr(), + shape=numpy_array.shape, + dtype=numpy_array.dtype, + default_initializer=NumpyArrayInitializer(numpy_array)) + ret.stop_gradient = True + return ret + + self._inputs['CustomDistProbs'] = _init_by_numpy_array( + np.array(custom_dist).astype('float32')) + self._inputs['CustomDistAlias'] = _init_by_numpy_array( + np.array(alias_).astype('int32')) + self._inputs['CustomDistAliasProbs'] = _init_by_numpy_array( + np.array(alias_probs_).astype('float32')) + sampler = 2 + else: + raise Exception("Unsupported sampler type.") + + if num_neg_samples is None: + num_neg_samples = 10 + else: + num_neg_samples = int(num_neg_samples) + self._num_neg_samples = num_neg_samples + remote_prefetch = is_sparse + print( + "With sparse mode, if your models has only small parameter prefetch may cause speed down" + ) + self._attrs = { + 'num_total_classes': int(num_total_classes), + 'num_neg_samples': num_neg_samples, + 'seed': seed, + 'sampler': sampler, + 'is_sparse': is_sparse, + 'remote_prefetch': remote_prefetch + } + + def _build_once(self, input, label, sample_weight=None): + assert isinstance(input, Variable) + assert isinstance(label, Variable) + + dim = input.shape[1] + num_true_class = label.shape[1] + self._w = self.create_parameter( + attr=self._param_attr, + shape=[self._num_total_classes, dim], + is_bias=False, + dtype=input.dtype) + if self._bias_attr: + self._b = self.create_parameter( + attr=self._bias_attr, + shape=[self._num_total_classes, 1], + is_bias=True, + dtype=input.dtype) + self._inputs['Bias'] = self._b + self._inputs['Weight'] = self._w + + def forward(self, input, label, sample_weight=None): + assert isinstance(input, Variable) + assert isinstance(label, Variable) + + self._inputs['Input'] = input + self._inputs['Label'] = label + self._inputs['SampleWeight'] = sample_weight if sample_weight is not None else [] + + cost = self._helper.create_variable_for_type_inference( + dtype=input.dtype) + sample_logits = self._helper.create_variable_for_type_inference( + dtype=input.dtype) + sample_labels = self._helper.create_variable_for_type_inference( + dtype=label.dtype) + + self._helper.append_op( + type='nce', + inputs=self._inputs, + outputs={ + 'Cost': cost, + 'SampleLogits': sample_logits, + 'SampleLabels': sample_labels + }, + attrs=self._attrs) + return cost / (self._num_neg_samples + 1) + + +class PRelu(layers.Layer): + """ + Equation: + + .. math:: + y = \max(0, x) + \\alpha * \min(0, x) + + Args: + x (Variable): The input tensor. + param_attr(ParamAttr|None): The parameter attribute for the learnable + weight (alpha). + mode (string): The mode for weight sharing. It supports all, channel + and element. all: all elements share same weight + channel:elements in a channel share same weight + element:each element has a weight + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The output tensor with the same shape as input. + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name="x", shape=[10,10], dtype="float32") + mode = 'channel' + output = fluid.layers.prelu(x,mode) + """ + + def __init__(self, name_scope, mode, param_attr=None): + + super(PRelu, self).__init__(name_scope) + self._mode = mode + self._param_attr = param_attr + if self._mode not in ['all', 'channel', 'element']: + raise ValueError('mode should be one of all, channel, element.') + self._alpha_shape = [1] + + def _build_once(self, input): + if self._mode == 'channel': + self._alpha_shape = [1, input.shape[1], 1, 1] + elif self._mode == 'element': + self._alpha_shape = input.shape + self._dtype = self._helper.input_dtype(input) + self._alpha = self.create_parameter( + attr=self._param_attr, + shape=self._alpha_shape, + dtype='float32', + is_bias=False, + default_initializer=Constant(1.0)) + + def forward(self, input): + + out = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type="prelu", + inputs={"X": input, + 'Alpha': self._alpha}, + attrs={"mode": self._mode}, + outputs={"Out": out}) + return out + + +class BilinearTensorProduct(layers.Layer): + """ + **Add Bilinear Tensor Product Layer** + + This layer performs bilinear tensor product on two inputs. + For example: + + .. math:: + out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1 + + In this formula: + - :math:`x`: the first input contains M elements, shape is [batch_size, M]. + - :math:`y`: the second input contains N elements, shape is [batch_size, N]. + - :math:`W_{i}`: the i-th learned weight, shape is [M, N] + - :math:`out_{i}`: the i-th element of out, shape is [batch_size, size]. + - :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`. + + Args: + x (Variable): 2-D input tensor with shape [batch_size, M] + y (Variable): 2-D input tensor with shape [batch_size, N] + size (int): The dimension of this layer. + act (str, default None): Activation to be applied to the output of this layer. + name (str, default None): The name of this layer. + param_attr (ParamAttr, default None): The parameter attribute for the learnable w. + parameters/weights of this layer. + bias_attr (ParamAttr, default None): The parameter attribute for the bias + of this layer. If it is set to False, no bias will be added to the output units. + If it is set to None, the bias is initialized zero. Default: None. + + Returns: + Variable: A 2-D Tensor of shape [batch_size, size]. + + Examples: + .. code-block:: python + + tensor = bilinear_tensor_product(x=layer1, y=layer2, size=1000) + """ + + def __init__(self, + name_scope, + size, + name=None, + act=None, + param_attr=None, + bias_attr=None): + super(BilinearTensorProduct, self).__init__(name_scope) + self._param_attr = param_attr + self._bias_attr = bias_attr + self._act = act + self._size = size + self._name = name + self._inputs = dict() + + def _build_once(self, x, y): + self._dtype = self._helper.input_dtype(x) + + param_shape = [self._size, x.shape[1], y.shape[1]] + + self._w = self.create_parameter( + attr=self._param_attr, + shape=param_shape, + dtype=self._dtype, + is_bias=False) + + if self._bias_attr: + bias_size = [1, self._size] + bias = self.create_parameter( + attr=self._bias_attr, + shape=bias_size, + dtype=self._dtype, + is_bias=True) + self._inputs["Bias"] = bias + + def forward(self, x, y): + self._inputs = {"X": x, "Y": y, "Weight": self._w} + if self._name is not None: + out = self._helper.create_variable( + name=".".join([self.full_name(), self._name]), + dtype=self._dtype, + persistable=False) + else: + out = self._helper.create_variable( + dtype=self._dtype, persistable=False) + self._helper.append_op( + type="bilinear_tensor_product", + inputs=self._inputs, + outputs={"Out": out}) + + # add activation + return self._helper.append_activation(out) + + +class Conv2DTranspose(layers.Layer): + """ + **Convlution2D transpose layer** + + The convolution2D transpose layer calculates the output based on the input, + filter, and dilations, strides, paddings. Input(Input) and output(Output) + are in NCHW format. Where N is batch size, C is the number of channels, + H is the height of the feature, and W is the width of the feature. + Parameters(dilations, strides, paddings) are two elements. These two elements + represent height and width, respectively. The details of convolution transpose + layer, please refer to the following explanation and references + `therein `_. + If bias attribution and activation type are provided, bias is added to + the output of the convolution, and the corresponding activation function + is applied to the final result. + + For each input :math:`X`, the equation is: + + .. math:: + + Out = \sigma (W \\ast X + b) + + Where: + + * :math:`X`: Input value, a tensor with NCHW format. + * :math:`W`: Filter value, a tensor with MCHW format. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. + + Example: + + - Input: + + Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` + + Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)` + + - Output: + + Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` + + Where + + .. math:: + + H^\prime_{out} &= (H_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (H_f - 1) + 1 \\\\ + W^\prime_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 \\\\ + H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ) \\\\ + W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ) + + Args: + input(Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of the filter. It is as same as the output + image channel. + output_size(int|tuple|None): The output image size. If output size is a + tuple, it must contain two integers, (image_H, image_W). None if use + filter_size, padding, and stride to calculate output_size. + if output_size and filter_size are specified at the same time, They + should follow the formula above. + filter_size(int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. None if use output size to + calculate filter_size. + padding(int|tuple): The padding size. If padding is a tuple, it must + contain two integers, (padding_H, padding_W). Otherwise, the + padding_H = padding_W = padding. Default: padding = 0. + stride(int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. Default: stride = 1. + dilation(int|tuple): The dilation size. If dilation is a tuple, it must + contain two integers, (dilation_H, dilation_W). Otherwise, the + dilation_H = dilation_W = dilation. Default: dilation = 1. + groups(int): The groups number of the Conv2d transpose layer. Inspired by + grouped convolution in Alex Krizhevsky's Deep CNN paper, in which + when group=2, the first half of the filters is only connected to the + first half of the input channels, while the second half of the + filters is only connected to the second half of the input channels. + Default: groups = 1. + param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights + of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose + will create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d_transpose. + If it is set to False, no bias will be added to the output units. + If it is set to None or one attribute of ParamAttr, conv2d_transpose + will create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True. + act (str): Activation type, if it is set to None, activation is not appended. + Default: None. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Default: True. + + Returns: + Variable: The tensor variable storing the convolution transpose result. + + Raises: + ValueError: If the shapes of input, filter_size, stride, padding and + groups mismatch. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3) + """ + + def __init__(self, + name_scope, + num_filters, + output_size=None, + filter_size=None, + padding=0, + stride=1, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None): + super(Conv2DTranspose, self).__init__(name_scope) + assert param_attr is not False, "param_attr should not be False in conv2d_transpose." + self._param_attr = param_attr + self._bias_attr = bias_attr + self._groups = groups + self._num_filters = num_filters + self._use_cudnn = use_cudnn + self._padding = padding + self._stride = stride + self._dilation = dilation + self._filter_size = filter_size + self._output_size = output_size + self._op_type = 'conv2d_transpose' + + def _build_once(self, input): + input_channel = input.shape[1] + if (input_channel == self._groups and + self._num_filters == input_channel and not self._use_cudnn): + self._op_type = 'depthwise_conv2d_transpose' + + if not isinstance(input, Variable): + raise TypeError("Input of conv2d_transpose must be Variable") + + self._padding = utils.convert_to_list(self._padding, 2, 'padding') + self._stride = utils.convert_to_list(self._stride, 2, 'stride') + self._dilation = utils.convert_to_list(self._dilation, 2, 'dilation') + + if not isinstance(self._use_cudnn, bool): + raise ValueError("use_cudnn should be True or False") + + if self._filter_size is None: + if self._output_size is None: + raise ValueError( + "output_size must be set when filter_size is None") + if isinstance(self._output_size, int): + self._output_size = [self._output_size, self._output_size] + + h_in = input.shape[2] + w_in = input.shape[3] + + filter_size_h = (self._output_size[0] - + (h_in - 1) * self._stride[0] + 2 * self._padding[0] + - 1) // self._dilation[0] + 1 + filter_size_w = (self._output_size[1] - + (w_in - 1) * self._stride[1] + 2 * self._padding[1] + - 1) // self._dilation[1] + 1 + self._filter_size = [filter_size_h, filter_size_w] + else: + self._filter_size = utils.convert_to_list( + self._output_size, 2, 'conv2d_transpose.filter_size') + + if self._output_size is None: + self._output_size = [] + elif isinstance(self._output_size, list) or isinstance( + self._output_size, int): + self._output_size = utils.convert_to_list(self._output_size, 2, + 'output_size') + else: + raise ValueError("output_size should be list or int") + self._padding = utils.convert_to_list(self._padding, 2, 'padding') + self._groups = 1 if self._groups is None else self._groups + filter_shape = [input_channel, self._num_filters // self._groups + ] + self._filter_size + + self._img_filter = self.create_parameter( + dtype=input.dtype, shape=filter_shape, attr=self._param_attr) + + def forward(self, input): + pre_bias = self._helper.create_variable_for_type_inference( + dtype=input.dtype) + self._helper.append_op( + type=self._op_type, + inputs={'Input': [input], + 'Filter': [self._img_filter]}, + outputs={'Output': pre_bias}, + attrs={ + 'output_size': self._output_size, + 'strides': self._stride, + 'paddings': self._padding, + 'dilations': self._dilation, + 'groups': self._groups, + 'use_cudnn': self._use_cudnn + }) + + pre_act = self._helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) + out = self._helper.append_activation(pre_act) + return out + + +class SequenceConv(layers.Layer): + """ + This function creates the op for sequence_conv, using the inputs and + other convolutional configurations for the filters and stride as given + in the input parameters to the function. + + Args: + input (Variable): ${x_comment} + num_filters (int): number of filters. + filter_size (int): the filter size (H and W). + filter_stride (int): stride of the filter. + padding (bool): if True, add paddings. + bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of sequence_conv. + If it is set to False, no bias will be added to the output units. + If it is set to None or one attribute of ParamAttr, sequence_conv + will create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. Default: None. + param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights + of sequence_conv. If it is set to None or one attribute of ParamAttr, sequence_conv + will create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. Default: None. + act (str): Activation type, if it is set to None, activation is not appended. + Default: None. + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Default: None. + + Returns: + Variable: output of sequence_conv + """ + + def __init__(self, + name_scope, + num_filters, + filter_size=3, + filter_stride=1, + padding=None, + bias_attr=None, + param_attr=None, + act=None): + super(SequenceConv, self).__init__(name_scope) + self._num_filters = num_filters + self._filter_size = filter_size + self._filter_stride = filter_stride + self._padding = padding + self._bias_attr = bias_attr + self._param_attr = param_attr + + def _build_once(self, input): + + self._dtype = self._helper.input_dtype(input) + print(self._filter_size) + filter_shape = [self._filter_size * input.shape[1], self._num_filters] + self._filter_param = self.create_parameter( + attr=self.param_attr, shape=filter_shape, dtype=self._dtype) + + def forward(self, input): + pre_bias = self._helper.create_variable_for_type_inference(self._dtype) + self._helper.append_op( + type='sequence_conv', + inputs={ + 'X': [input], + 'Filter': [self._filter_param], + }, + outputs={"Out": pre_bias}, + attrs={ + 'contextStride': self._filter_stride, + 'contextStart': -int(self._filter_size // 2), + 'contextLength': self._filter_size + }) + pre_act = self._helper.append_bias_op(pre_bias) + return self._helper.append_activation(pre_act) diff --git a/python/paddle/fluid/imperative/tracer.py b/python/paddle/fluid/imperative/tracer.py index bd77de7424c4547ea71a3f757de37f47b990d616..28c8586813410f7349da7943a966eaa9cc3816d2 100644 --- a/python/paddle/fluid/imperative/tracer.py +++ b/python/paddle/fluid/imperative/tracer.py @@ -62,7 +62,7 @@ class Tracer(core.Tracer): if len(backward_refs) > 0: op.iop.register_backward_hooks(release_op) - # TODO(minqiyang): remove all inputs and outputs after seperate + # TODO(minqiyang): remove all inputs and outputs after separate # var and grad op.backward_refs = defaultdict(list) for k, v in six.iteritems(op.inputs): diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 482dfa6fac05bd914efa384bd0f5ec54cfab1dca..8358bb1aba98d8f5699cbda27e657ba6c470d333 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -212,7 +212,7 @@ class UniformInitializer(Initializer): if self._seed == 0: self._seed = block.program.random_seed - # to be compatible of fp16 initalizers + # to be compatible of fp16 initializers if var.dtype == VarDesc.VarType.FP16: out_dtype = VarDesc.VarType.FP32 out_var = block.create_var( @@ -756,7 +756,7 @@ class NumpyArrayInitializer(Initializer): values = [int(v) for v in self._value.flat] else: raise ValueError("Unsupported dtype %s", self._value.dtype) - if self._value.size > 1024 * 1024 * 5: + if self._value.size > 1024 * 1024 * 1024: raise ValueError("The size of input is too big. Please consider " "saving it to file and 'load_op' to load it") op = block._prepend_op( diff --git a/python/paddle/fluid/install_check.py b/python/paddle/fluid/install_check.py new file mode 100644 index 0000000000000000000000000000000000000000..3569a8bc357daf9408e8ae3eb53ad9d2942cfeaa --- /dev/null +++ b/python/paddle/fluid/install_check.py @@ -0,0 +1,69 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .framework import Program, program_guard, unique_name, default_startup_program +from .param_attr import ParamAttr +from .initializer import Constant +from . import layers +from . import backward +from .imperative import Layer, nn +from . import executor + +from . import core +import numpy as np + +__all__ = ['run_check'] + + +class SimpleLayer(Layer): + def __init__(self, name_scope): + super(SimpleLayer, self).__init__(name_scope) + self._fc1 = nn.FC(self.full_name(), + 3, + ParamAttr(initializer=Constant(value=0.1))) + + def forward(self, inputs): + x = self._fc1(inputs) + x = layers.reduce_sum(x) + return x + + +def run_check(): + ''' intall check to verify if install is success + + This func should not be called only if you need to verify installation + ''' + print("Running Verify Fluid Program ... ") + prog = Program() + startup_prog = Program() + scope = core.Scope() + with executor.scope_guard(scope): + with program_guard(prog, startup_prog): + with unique_name.guard(): + np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) + inp = layers.data( + name="inp", shape=[2, 2], append_batch_size=False) + simple_layer = SimpleLayer("simple_layer") + out = simple_layer(inp) + param_grads = backward.append_backward( + out, parameter_list=[simple_layer._fc1._w.name])[0] + exe = executor.Executor(core.CPUPlace( + ) if not core.is_compiled_with_cuda() else core.CUDAPlace(0)) + exe.run(default_startup_program()) + exe.run(feed={inp.name: np_inp}, + fetch_list=[out.name, param_grads[1].name]) + + print( + "Your Paddle Fluid is installed successfully! Let's start deep Learning with Paddle Fluid now" + ) diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 326a84d82b5718dad898620a6d9e0490f7519448..4d5523627218601d00021c72a8777b4b6413880e 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -26,12 +26,14 @@ from paddle.fluid import layers from paddle.fluid.executor import Executor from paddle.fluid.evaluator import Evaluator from paddle.fluid.framework import Program, Parameter, default_main_program, default_startup_program, Variable, program_guard +from . import reader +from .reader import * from . import core __all__ = [ 'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params', 'load_persistables', 'save_inference_model', 'load_inference_model' -] +] + reader.__all__ def is_parameter(var): diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 6f60fad94dca5b02bca14cda33df14c459d1a075..a85ef3c13f845959200d26391f6c95923a11c6ed 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -151,13 +151,7 @@ class LayerHelper(LayerHelperBase): act['use_mkldnn'] = self.kwargs.get('use_mkldnn') act_type = act.pop('type') - tmp = input_var - # NOTE(dzhwinter): some activation support inplace compution. - # NOTE(minqiyang): currently, we don't support inplace in imperative mode - if not _in_imperative_mode() and core.IsInplace(act_type): - tmp = input_var - else: - tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) + tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) self.append_op( type=act_type, inputs={"X": [input_var]}, diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index 3504cb7935178f28369914ecbd93c24b82622b11..a68160d797bcaca8cff849c560960d6a8823de53 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -268,11 +268,9 @@ class LayerHelperBase(object): """ # Deepcopy the attr so that parameters can be shared in program attr = copy.deepcopy(attr) - if attr is None: - attr = ParamAttr._to_attr(attr) + attr = ParamAttr._to_attr(attr) if not attr: return None - assert isinstance(attr, ParamAttr) suffix = 'b' if is_bias else 'w' if attr.name is None: diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index a9b391fd53a98dc05ee2d909a38dcf82cd5880ea..94fd9f3ea5a41a542da0115a66a52a5cd7f26748 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -563,22 +563,26 @@ def _py_reader(capacity, def start_provide_thread(func): def __provider_thread__(): - for tensors in func(): - array = core.LoDTensorArray() - for item in tensors: - if not isinstance(item, core.LoDTensor): - tmp = core.LoDTensor() - tmp.set(item, core.CPUPlace()) - item = tmp - - array.append(item) - - if reader.exited: - break - feed_queue.push(array) - if reader.exited: - break - feed_queue.close() + try: + for tensors in func(): + array = core.LoDTensorArray() + for item in tensors: + if not isinstance(item, core.LoDTensor): + tmp = core.LoDTensor() + tmp.set(item, core.CPUPlace()) + item = tmp + + array.append(item) + + if reader.exited: + break + feed_queue.push(array) + if reader.exited: + break + feed_queue.close() + except Exception as ex: + feed_queue.close() + raise ex reader.thread = threading.Thread(target=__provider_thread__) reader.thread.daemon = True @@ -628,6 +632,9 @@ def _py_reader(capacity, reader.reset = __reset__ reader.decorate_tensor_provider = __set_tensor_provider__ reader.decorate_paddle_reader = __set_paddle_reader__ + + reader.decorate_batch_generator = __set_tensor_provider__ + reader.decorate_sample_list_generator = __set_paddle_reader__ reader.start = __start__ return reader @@ -692,6 +699,11 @@ def py_reader(capacity, >>> exe.run(fetch_list=[loss.name]) >>> except fluid.core.EOFException: >>> reader.reset() + >>> + >>> ... + >>> + >>> fluid.io.save_inference_model(dirname='./model', feeded_var_names=[img, label], + >>> target_vars=[loss], executor=fluid.Executor(fluid.CUDAPlace(0))) 2. When training and testing are both performed, two different :code:`py_reader` should be created with different names, e.g.: diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 76c4c146d19e3d48c19d9f821bbc025a653dfd42..a8924a1e8a57b4be28d8883094ea1a32539bcd9c 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -191,6 +191,7 @@ __all__ = [ 'huber_loss', 'tree_conv', 'npair_loss', + 'fsp_matrix', ] kIgnoreIndex = -100 @@ -6307,7 +6308,8 @@ def one_hot(input, depth): type="one_hot", inputs={'X': input}, attrs={'depth': depth}, - outputs={'Out': one_hot_out}) + outputs={'Out': one_hot_out}, + stop_gradient=True) return one_hot_out @@ -10892,3 +10894,46 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002): celoss = reduce_mean(cross_entropy) return l2loss + celoss + + +def fsp_matrix(x, y): + """ + + **FSP matrix op** + + This op is used to calculate the flow of solution procedure (FSP) matrix of two feature maps. + Given feature map x with shape [x_channel, h, w] and feature map y with shape + [y_channel, h, w], we can get the fsp matrix of x and y in two steps: + + 1. reshape x into matrix with shape [x_channel, h * w] and reshape and + transpose y into matrix with shape [h * w, y_channel]. + 2. multiply x and y to get fsp matrix with shape [x_channel, y_channel]. + + The output is a batch of fsp matrices. + + Args: + + x (Variable): A feature map with shape [batch_size, x_channel, height, width]. + y (Variable): A feature map with shape [batch_size, y_channel, height, width]. + The y_channel can be different with the x_channel of Input(X) + while the other dimensions must be the same with Input(X)'s. + + Returns: + + fsp matrix (Variable): The output of FSP op with shape [batch_size, x_channel, y_channel]. + The x_channel is the channel of x and the y_channel is the channel of y. + + Examples: + + .. code-block:: python + + feature_map_0 = fluid.layers.conv2d(x) + feature_map_1 = fluid.layers.conv2d(feature_map_0) + loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1) + + """ + helper = LayerHelper('fsp_matrix', **locals()) + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype( + input_param_name='x')) + helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out}) + return out diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index a18e5b6a9c3fe69ee0bcadc150f07b72227df85e..ef90638c721810e618ce4760e83e1a63b86c2325 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -13,7 +13,7 @@ # limitations under the License. from __future__ import print_function - +from six.moves import reduce from ..layer_helper import LayerHelper from ..param_attr import ParamAttr from ..framework import convert_np_dtype_to_dtype_ diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 8918886a804847bc31ba6a3eeb175194bed7c4ec..c0deb5eaccaefa52271b2c30e9f8b1d339624919 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -165,6 +165,8 @@ class Optimizer(object): name = self._name + "_" + name if (name in self._accumulators and param.name in self._accumulators[name]): + if framework._in_imperative_mode(): + return self._accumulators[name][param.name] raise Exception("Accumulator {} already exists for parameter {}". format(name, param.name)) if shape == None: @@ -397,13 +399,14 @@ class Optimizer(object): for param in parameters: if not param.trainable: continue - # create gradient variable - grad_var = Variable( - block=loss.block, - name=param._ivar._grad_name(), - stop_gradient=True, - ivar=param._ivar._grad_ivar()) - params_grads.append((param, grad_var)) + if param._ivar._grad_ivar() is not None: + # create gradient variable + grad_var = Variable( + block=loss.block, + name=param._ivar._grad_name(), + stop_gradient=True, + ivar=param._ivar._grad_ivar()) + params_grads.append((param, grad_var)) with program_guard(framework.default_main_program(), framework.default_startup_program()): optimize_ops = self._create_optimization_pass(params_grads) diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 517418da1cf2f745ee5578e3c2b118394db7fae7..6702fc808b121d80fe555412e2cc7f673d6d8389 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -99,7 +99,8 @@ class ParallelExecutor(object): build_strategy.num_trainers = num_trainers build_strategy.trainer_id = trainer_id - self._places = compiler.get_available_places(use_cuda) + self._places = framework.cuda_places( + ) if use_cuda else framework.cpu_places() self._scope = scope if scope is not None else executor.global_scope() main_program = main_program if main_program is not None \ diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py new file mode 100644 index 0000000000000000000000000000000000000000..74ee2828deb6ecd51ff36b878e97254a62ad1cb6 --- /dev/null +++ b/python/paddle/fluid/reader.py @@ -0,0 +1,373 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import core +import six +import threading +from .framework import Program, Variable, program_guard, default_main_program, default_startup_program +from .executor import global_scope +from .data_feeder import DataFeeder, BatchedTensorProvider +from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer +from .unique_name import UniqueNameGenerator + +__all__ = ['PyReader'] + + +def _convert_places(places): + if not isinstance(places, (list, tuple)): + places = [places] + + ret = [] + for p in places: + if not isinstance(p, core.Place): + tmp = core.Place() + tmp.set_place(p) + p = tmp + + ret.append(p) + return ret + + +class PyReader(object): + """ + Create a reader object for data feeding in Python. + Data would be prefetched using Python thread and be pushed + into a queue asynchronously. Data in the queue would be extracted + automatically when `Executor.run(...)` is called. + + Args: + feed_list (list(Variable)|tuple(Variable)): feed variable list. + The variables should be created by :code:`fluid.layers.data()`. + capacity (int): capacity of the queue maintained in PyReader object. + use_double_buffer (bool): whether to use double_buffer_reader to + speed up data feeding. + iterable (bool): whether the created reader object is iterable. + + Returns: + reader (Reader): the created reader object. + + Examples: + 1. If iterable = False, the created PyReader object is almost the + same as :code:`fluid.layers.py_reader()`. Operators would be + inserted into the program. User should call :code:`start()` + before each epoch and catch :code:`fluid.core.EOFException` + thrown by :code:`Executor.run()` when epoch ends. Once the + exception is caught, user should call :code:`reset()` to reset + the reader manually. + + .. code-block:: python + + image = fluid.layers.data( + name='image', shape=[784], dtype='float32') + label = fluid.layers.data( + name='label', shape=[1], dtype='int64') + + reader = fluid.io.PyReader(feed_list=[image, label], + capacity=4, iterable=False) + reader.decorate_sample_list_generator(user_defined_reader) + ... # definition of network is omitted + executor.run(fluid.default_main_program()) + for _ in range(EPOCH_NUM): + reader.start() + while True: + try: + executor.run(feed=None, ...) + except fluid.core.EOFException: + reader.reset() + break + + 2. If iterable=True, the created PyReader object is decoupled with + the program. No operator would be inserted into the program. + In this case, the created reader is a Python generator, which + is iterable. User should feed the data yielded from PyReader + object into :code:`Executor.run(feed=...)`. + + .. code-block:: python + + image = fluid.layers.data( + name='image', shape=[784], dtype='float32') + label = fluid.layers.data( + name='label', shape=[1], dtype='int64') + + reader = fluid.io.PyReader(feed_list=[image, label], + capacity=4, iterable=True) + reader.decorate_sample_list_generator(user_defined_reader, + places=fluid.cuda_places()) + ... # definition of network is omitted + executor.run(fluid.default_main_program()) + for _ in range(EPOCH_NUM): + for data in reader(): + executor.run(feed=data, ...) + """ + + unique_name_generator = UniqueNameGenerator() + + def __init__(self, + feed_list, + capacity, + use_double_buffer=True, + iterable=False): + self._tensor_reader = None + self._thread = None + self._iterable = iterable + self._use_double_buffer = use_double_buffer + self._capacity = capacity + self._feed_list = feed_list + if not self._iterable: + self._init_non_iterable() + + def _init_iterable(self, places): + self._var_names = [v.name for v in self._feed_list] + self._places = _convert_places(places) + self._queue = core.init_lod_tensor_blocking_queue(core.Variable(), + self._capacity) + self._reader = core.create_py_reader( + self.queue, self._var_names, self._places, self._use_double_buffer) + + def _init_non_iterable(self): + lod_levels = [] + dtypes = [] + shape_concat = [] + ranks = [] + shapes = [] + + for feed_data in self._feed_list: + dtypes.append(feed_data.dtype) + shape_concat.extend(feed_data.shape) + ranks.append(len(feed_data.shape)) + shapes.append(feed_data.shape) + lod_levels.append(feed_data.lod_level) + + queue_name = PyReader.unique_name_generator('lod_tensor_blocking_queue') + reader_name = PyReader.unique_name_generator('create_py_reader') + double_buffer_name = PyReader.unique_name_generator('double_buffer') + + var = global_scope().var(queue_name) + self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity) + + startup_blk = default_startup_program().current_block() + startup_var = startup_blk.create_var(name=reader_name) + + startup_blk.append_op( + type='create_py_reader', + inputs={'blocking_queue': [queue_name]}, + outputs={'Out': [startup_var]}, + attrs={ + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'ranks': ranks + }) + + startup_var.desc.set_dtypes(dtypes) + startup_var.persistable = True + + main_prog_var = _copy_reader_var_( + default_main_program().current_block(), startup_var) + + main_prog_var.stop_gradient = True + main_prog_var.persistable = True + + reader = monkey_patch_reader_methods(main_prog_var) + if self._use_double_buffer: + double_buffer_reader = double_buffer( + reader, name=double_buffer_name) + # we return a double buffer reader. However, the reset method comes from + # py_reader. + double_buffer_reader.reset = reader.reset + reader = double_buffer_reader + + self._reader = reader + + default_main_program().current_block().append_op( + type='read', + inputs={'Reader': [self._reader]}, + outputs={'Out': self._feed_list}) + + @property + def queue(self): + return self._queue + + @property + def iterable(self): + return self._iterable + + def __call__(self): + assert self.iterable, "PyReader is not iterable" + assert self._tensor_reader is not None, \ + "Data source of PyReader has not set yet" + + class Iterator(object): + def __init__(self, reader): + self._reader = reader._reader + self._reset = reader._reset + + def __iter__(self): + return self + + def __next__(self): + return self.next() + + def next(self): + ret = self._reader.read_next() + if ret: + return ret + else: + self._reset() + raise StopIteration + + self._start() + return Iterator(self) + + def _reset(self): + self._reader.reset() + self._thread.join() + + def start(self): + ''' + Start the data feeding thread. + Can only call when the reader object is not iterable. + ''' + assert not self._iterable, "start() cannot be called when PyReader is iterable" + self._start() + + def reset(self): + ''' + Reset the reader object when :code:`fluid.core.EOFException` raises. + Can only call when the reader object is not iterable. + ''' + assert not self._iterable, "reset() cannot be called when PyReader is iterable" + self._reset() + + def _start(self): + def __thread_main__(): + try: + for tensors in self._tensor_reader(): + array = core.LoDTensorArray() + for item in tensors: + if not isinstance(item, core.LoDTensor): + tmp = core.LoDTensor() + tmp.set(item, core.CPUPlace()) + item = tmp + + array.append(item) + + if not self._queue.push(array): + break + + self._queue.close() + except Exception as ex: + self._queue.close() + raise ex + + self._thread = threading.Thread(target=__thread_main__) + self._thread.daemon = True + self._thread.start() + + def decorate_sample_generator(self, + sample_generator, + batch_size, + drop_last=True, + places=None): + ''' + Set the data source of the PyReader object. + + The provided :code:`sample_generator` should be a Python generator, + which yields numpy.ndarray typed data of each sample. + + :code:`places` must be set when the PyReader object is iterable. + + If all inputs have no lods, this method is faster than + :code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` . + + Args: + sample_generator (generator): Python generator that yields + numpy.ndarray-typed sample data. + batch_size (int): batch size. Must be larger than 0. + drop_last (bool): Whether to drop the last batch when sample number + is less than batch_size. + places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must + be provided when PyReader is iterable. + ''' + assert batch_size > 0, "batch_size must be larger than 0" + has_lod = False + for f in self._feed_list: + if f.lod_level != 0: + has_lod = True + break + + if has_lod: + self.decorate_sample_list_generator( + paddle.batch( + sample_generator, + batch_size=batch_size, + drop_last=drop_last), + places=places) + else: + reader = BatchedTensorProvider( + feed_list=self._feed_list, + place=core.CPUPlace(), + batch_size=batch_size, + generator=sample_generator, + drop_last=drop_last) + self.decorate_batch_generator(reader, places=places) + + def decorate_sample_list_generator(self, reader, places=None): + ''' + Set the data source of the PyReader object. + + The provided :code:`reader` should be a Python generator, + which yields list(numpy.ndarray) typed batched data. + + :code:`places` must be set when the PyReader object is iterable. + + Args: + reader (generator): Python generator that yields + list(numpy.ndarray)-typed batched data. + places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must + be provided when PyReader is iterable. + ''' + assert self._tensor_reader is None, \ + "Cannot reset the data source of PyReader" + with program_guard(Program(), Program()): + feeder = DataFeeder( + feed_list=self._feed_list, place=core.CPUPlace()) + paddle_reader = feeder.decorate_reader(reader, multi_devices=False) + + def __tensor_reader_impl__(): + for slots in paddle_reader(): + yield [slots[var.name] for var in self._feed_list] + + self.decorate_batch_generator(__tensor_reader_impl__, places) + + def decorate_batch_generator(self, reader, places=None): + ''' + Set the data source of the PyReader object. + + The provided :code:`reader` should be a Python generator, + which yields numpy.ndarray-typed or LoDTensor-typed batched data. + + :code:`places` must be set when the PyReader object is iterable. + + Args: + reader (generator): Python generator that yields LoDTensor-typed + batched data. + places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must + be provided when PyReader is iterable. + ''' + assert self._tensor_reader is None, \ + "Cannot reset the data source of PyReader" + self._tensor_reader = reader + if self._iterable: + assert places is not None, "Places cannot be None when py_reader is iterable" + self._init_iterable(places) diff --git a/python/paddle/fluid/tests/unittests/dist_ctr_reader.py b/python/paddle/fluid/tests/unittests/dist_ctr_reader.py index 95e39d891f7e6a3dcb57540bd96fe70027443cda..48a4768782c1b4aa8ff6cfdbda9c8e8eb717d08f 100644 --- a/python/paddle/fluid/tests/unittests/dist_ctr_reader.py +++ b/python/paddle/fluid/tests/unittests/dist_ctr_reader.py @@ -20,7 +20,7 @@ logging.basicConfig() logger = logging.getLogger("paddle") logger.setLevel(logging.INFO) -DATA_URL = "http://paddle-ctr-data.cdn.bcebos.com/avazu_ctr_data.tgz" +DATA_URL = "http://paddle-ctr-data.bj.bcebos.com/avazu_ctr_data.tgz" DATA_MD5 = "c11df99fbd14e53cd4bfa6567344b26e" """ avazu_ctr_data/train.txt diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..377014510b55633f697ef7bf2f5f597281e5f5a5 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py @@ -0,0 +1,175 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +import numpy as np +import time +import six +import unittest + +EPOCH_NUM = 60 +BATCH_SIZE = 32 +CLASS_NUM = 10 + + +def random_reader(): + np.random.seed(1) + for i in range(BATCH_SIZE * 40): + image = np.random.random([784]) + label = np.random.random_integers(low=0, high=CLASS_NUM - 1) + yield image, label + + +def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): + startup_prog = fluid.Program() + main_prog = fluid.Program() + startup_prog.random_seed = 1 + main_prog.random_seed = 1 + + with fluid.unique_name.guard(): + with fluid.program_guard(main_prog, startup_prog): + image = fluid.layers.data( + name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + py_reader = fluid.io.PyReader( + feed_list=[image, label], + capacity=4, + iterable=not use_legacy_py_reader, + use_double_buffer=use_double_buffer) + hidden = image + for hidden_size in [10, 20, 30]: + hidden = fluid.layers.fc( + hidden, + size=hidden_size, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + + predict_label = fluid.layers.fc(hidden, + size=CLASS_NUM, + act='softmax') + loss = fluid.layers.mean( + fluid.layers.cross_entropy( + input=predict_label, label=label)) + + optimizer = fluid.optimizer.Adam() + optimizer.minimize(loss) + return startup_prog, main_prog, py_reader, loss + + +class TestBase(unittest.TestCase): + def run_main(self, use_legacy_py_reader, with_data_parallel, places, + use_double_buffer): + scope = fluid.Scope() + with fluid.scope_guard(scope): + startup_prog, main_prog, py_reader, loss = simple_fc_net( + places, use_legacy_py_reader, use_double_buffer) + + reader = paddle.batch(random_reader, batch_size=BATCH_SIZE) + + ps = places if use_double_buffer else fluid.cpu_places(len(places)) + + py_reader.decorate_sample_list_generator( + reader, places=ps if py_reader.iterable else None) + + exe = fluid.Executor(place=places[0]) + exe.run(startup_prog) + + prog = fluid.CompiledProgram(main_prog) + if with_data_parallel: + prog = prog.with_data_parallel( + loss_name=loss.name, places=places) + + step = 0 + step_list = [] + loss_list = [] + start_t = time.time() + if not py_reader.iterable: + for _ in six.moves.range(EPOCH_NUM): + step = 0 + py_reader.start() + while True: + try: + L, = exe.run(program=prog, + fetch_list=[loss], + use_program_cache=True) + loss_list.append(np.mean(L)) + step += 1 + except fluid.core.EOFException: + py_reader.reset() + break + step_list.append(step) + else: + for _ in six.moves.range(EPOCH_NUM): + step = 0 + for d in py_reader(): + assert len(d) == len(places) + for i, item in enumerate(d): + image = item['image'] + label = item['label'] + assert image.shape() == [BATCH_SIZE, 784] + assert label.shape() == [BATCH_SIZE, 1] + assert image._place()._equals(ps[i]) + assert label._place()._equals(ps[i]) + L, = exe.run(program=prog, + feed=d, + fetch_list=[loss], + use_program_cache=True) + loss_list.append(np.mean(L)) + step += 1 + step_list.append(step) + end_t = time.time() + ret = { + "time": end_t - start_t, + "step": step_list, + "loss": np.array(loss_list) + } + return ret + + def prepare_places(self, with_data_parallel, with_cpu=True, with_gpu=True): + places = [] + if with_cpu: + places.append([fluid.CPUPlace()]) + if with_data_parallel: + places.append([fluid.CPUPlace()] * 2) + + if with_gpu and fluid.core.is_compiled_with_cuda(): + tmp = fluid.cuda_places() + assert len(tmp) > 0, "no gpu detected" + if with_data_parallel: + places.append(tmp) + places.append([tmp[0]]) + return places + + def test_main(self): + for with_data_parallel in [True, False]: + for p in self.prepare_places(with_data_parallel): + for use_double_buffer in [False, True]: + results = [] + for use_legacy_py_reader in [False, True]: + ret = self.run_main( + use_legacy_py_reader=use_legacy_py_reader, + with_data_parallel=with_data_parallel, + places=p, + use_double_buffer=use_double_buffer) + results.append(ret) + if not use_double_buffer: + diff = np.max( + np.abs(results[0]['loss'] - results[1]['loss'])) + self.assertLess(diff, 1e-3) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dist_save_load.py b/python/paddle/fluid/tests/unittests/test_dist_save_load.py index e795bc410ee45a18cc0c7c914636f5b03309fad1..8c2d6d9b4dc0624daea7b6968d47bae9e925e034 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_dist_save_load.py @@ -68,9 +68,9 @@ class TestDistSaveLoadDense2x2(TestDistBase): train0_np = np.array(tr0_var) train1_np = np.array(tr1_var) - self.assertAlmostEqual(local_np.all(), train0_np.all(), delta=delta) - self.assertAlmostEqual(local_np.all(), train1_np.all(), delta=delta) - self.assertAlmostEqual(train0_np.all(), train1_np.all(), delta=delta) + np.testing.assert_almost_equal(local_np, train0_np, decimal=2) + np.testing.assert_almost_equal(local_np, train1_np, decimal=2) + np.testing.assert_almost_equal(train0_np, train1_np, decimal=2) def test_dist(self): need_envs = { @@ -134,10 +134,8 @@ class TestDistSaveLoadWithPServerStateDense2x2(TestDistBase): train0_2_np = np.array(tr0_var_2) train1_2_np = np.array(tr1_var_2) - self.assertAlmostEqual( - train0_1_np.all(), train0_2_np.all(), delta=delta) - self.assertAlmostEqual( - train1_1_np.all(), train1_2_np.all(), delta=delta) + np.testing.assert_almost_equal(train0_1_np, train0_2_np, decimal=2) + np.testing.assert_almost_equal(train1_1_np, train1_2_np, decimal=2) def test_dist(self): need_envs = { diff --git a/python/paddle/fluid/tests/unittests/test_dist_transformer.py b/python/paddle/fluid/tests/unittests/test_dist_transformer.py index 25dcccc28d710695d4c5e08c17816669d0fae5d8..3307caa8b2d62d5a31a7eeb36bb207b31d749b55 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_dist_transformer.py @@ -21,7 +21,7 @@ from test_dist_base import TestDistBase def download_files(): - url_prefix = 'http://paddle-unittest-data.cdn.bcebos.com/dist_transformer/' + url_prefix = 'http://paddle-unittest-data.bj.bcebos.com/dist_transformer/' vocab_url = url_prefix + 'vocab.bpe.32000' vocab_md5 = 'a86d345ca6e27f6591d0dccb1b9be853' paddle.dataset.common.download(vocab_url, 'test_dist_transformer', diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py new file mode 100644 index 0000000000000000000000000000000000000000..adf07897d561cf49c70841c5a4114b51b4cf55f1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py @@ -0,0 +1,183 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import numpy as np +os.environ['FLAGS_use_ngraph'] = '0' +os.environ['FLAGS_use_mkldnn'] = '0' +os.environ['CPU_NUM'] = '4' + +import paddle.fluid as fluid +import six +import unittest +import multiprocessing + +fluid.core._set_eager_deletion_mode(0.0, 1.0, True) + + +def simple_fc_net(): + image = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = image + for _ in range(4): + hidden = fluid.layers.fc( + hidden, + size=200, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + optimizer = fluid.optimizer.Adam(learning_rate=1e-3) + optimizer.minimize(loss) + return image, label, loss + + +def get_persistables_and_non_persistables(prog, fetch_list): + num_block = prog.num_blocks + persitables = set() + non_persistables = set() + for bid in six.moves.range(num_block): + block = prog.block(bid) + for _, var in block.vars.items(): + if var.persistable or var.name in fetch_list: + persitables.add(var.name) + else: + non_persistables.add(var.name) + + return persitables, non_persistables + + +class TestExecutor(unittest.TestCase): + def test_executor_main(self): + places = [fluid.CPUPlace()] + if fluid.core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + + for p in places: + self.place = p + with fluid.program_guard(fluid.Program(), fluid.Program()): + with fluid.scope_guard(fluid.Scope()): + with fluid.unique_name.guard(): + self.executor_main() + + for p in places: + self.place = p + with fluid.program_guard(fluid.Program(), fluid.Program()): + with fluid.scope_guard(fluid.Scope()): + with fluid.unique_name.guard(): + self.pe_main() + + def prepare_feed(self, image, label, dev_cnt=1): + batch_size = 32 * dev_cnt + image_shape = (batch_size, ) + tuple(image.shape[1:]) + label_shape = (batch_size, ) + tuple(label.shape[1:]) + + image_np = np.random.random(size=image_shape).astype('float32') + label_np = np.random.random_integers( + low=0, high=9, size=label_shape).astype('int64') + + return image_np, label_np + + def assertScopeVar(self, scope, persitables, non_persistables): + outline_p_vars = [] + for name in persitables: + var = scope.find_var(name) + self.assertTrue(var is not None) + t = var.get_tensor() + if not t._is_initialized(): + outline_p_vars.append(name) + + outline_np_vars = [] + for name in non_persistables: + var = scope.find_var(name) + self.assertTrue(var is not None) + t = var.get_tensor() + if t._is_initialized(): + outline_np_vars.append(name) + + print('Non-alive persistable vars {} in {}'.format(outline_p_vars, + persitables)) + print('Alive non-persistable vars {} in {}'.format(outline_np_vars, + non_persistables)) + self.assertEqual(len(outline_p_vars), 0) + self.assertEqual(len(outline_np_vars), 0) + + def executor_main(self): + image, label, loss = simple_fc_net() + loss.persistable = False + persistables, non_persistables = get_persistables_and_non_persistables( + fluid.default_main_program(), [loss.name]) + print('Non-persistable var number {}'.format(len(non_persistables))) + print(non_persistables) + + exe = fluid.Executor(self.place) + exe.run(fluid.default_startup_program()) + + p = fluid.core.Place() + p.set_place(self.place) + exe = fluid.core.Executor(p) + + for _ in six.moves.range(10): + image_np, label_np = self.prepare_feed(image, label) + fluid.global_scope().var(image.name).get_tensor().set(image_np, + self.place) + fluid.global_scope().var(label.name).get_tensor().set(label_np, + self.place) + # exe.run would not create local scope + # so that we can detect whether gc clears temporary variables + exe.run(fluid.default_main_program().desc, + fluid.global_scope(), 0, False, True, [loss.name]) + self.assertScopeVar(fluid.global_scope(), persistables, + non_persistables) + + def pe_main(self): + image, label, loss = simple_fc_net() + loss.persistable = False + persitables, non_persistables = get_persistables_and_non_persistables( + fluid.default_main_program(), [loss.name]) + + exe = fluid.Executor(self.place) + exe.run(fluid.default_startup_program()) + + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.num_iteration_per_drop_scope = 100 + + build_strategy = fluid.BuildStrategy() + build_strategy.memory_optimize = False + build_strategy.enable_inplace = False + + prog = fluid.CompiledProgram(fluid.default_main_program( + )).with_data_parallel( + loss_name=loss.name, exec_strategy=exec_strategy) + + dev_cnt = fluid.core.get_cuda_device_count() if isinstance(self.place, fluid.CUDAPlace) \ + else int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + + for idx in six.moves.range(10): + image_np, label_np = self.prepare_feed(image, label, dev_cnt) + feed = {image.name: image_np, label.name: label_np} + + exe.run(program=prog, feed=feed, fetch_list=[loss]) + + local_scopes = prog._local_scopes + for scope in local_scopes: + kids = scope._kids() + self.assertTrue(len(kids) == 1) + self.assertScopeVar(kids[0], persistables, non_persistables) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py index 910f53a91a7b5ca1413adf9505ed2c3ad3d56dad..d4c043d9c76f21482f17b9bb20c4fde5ce7cc6e7 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_dynamic_rnn_base.py @@ -13,7 +13,6 @@ # limitations under the License. import os -os.environ['FLAGS_eager_delete_tensor_gb'] = '0.0' os.environ['CPU_NUM'] = '2' import six diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py index 5ed3d9fdf3bf765f1b9ef8ba1ef2a5795f1874c7..1023c18f410fb60592154bbdf421d58aa88c71ae 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py @@ -16,6 +16,8 @@ import unittest from test_eager_deletion_dynamic_rnn_base import TestBase import paddle.fluid as fluid +fluid.core._set_eager_deletion_mode(0.0, 1.0, True) + def gru_net(data, label, diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py index 8462c06aa56e0469fd06c7dc4b2ed514f7eb51ba..6784edb9d7b2e9cd95f8646e9f8a210296dac94e 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py @@ -16,6 +16,8 @@ from test_eager_deletion_dynamic_rnn_base import TestBase import paddle.fluid as fluid import unittest +fluid.core._set_eager_deletion_mode(0.0, 1.0, True) + def lstm_net(data, label, diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_mnist.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_mnist.py index 56dfb095def62bc617948821038f0c15c1547683..ecdf9efa451743f8368079183fcb33f1769a6ab5 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_mnist.py @@ -14,7 +14,9 @@ import os import unittest -os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" +import paddle.fluid as fluid + +fluid.core._set_eager_deletion_mode(0.0, 1.0, True) # FIXME(zjl): It seems that this unittest fails randomly # when comparing all reduce last loss and reduce last loss diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_transformer.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_transformer.py index 05cc41b96f1992718c21eb5d7d2605dd8d3b2218..44568ff66b61affdd5be809e23ba09597645d470 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_transformer.py @@ -14,7 +14,9 @@ import os import unittest -os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" +import paddle.fluid as fluid + +fluid.core._set_eager_deletion_mode(0.0, 1.0, True) os.environ['RECORDIO_FILENAME'] = './eager_deletion_transformer.wmt16.recordio' diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py index 898d04ebe1c9c2c3a336aeca07ab6ce79a890e0a..581f7eff896791da33e179bb8a10f7742aa2d05e 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py @@ -16,8 +16,6 @@ from __future__ import print_function import os os.environ['CPU_NUM'] = '2' -os.environ['FLAGS_eager_delete_tensor_gb'] = '0.0' -os.environ['FLAGS_fast_eager_deletion_mode'] = '1' import unittest import paddle.fluid as fluid @@ -29,6 +27,8 @@ import paddle.fluid.compiler as compiler import numpy import multiprocessing +fluid.core._set_eager_deletion_mode(0.0, 1.0, True) + class TestEagerDeletionWhileOpBase(unittest.TestCase): def test_main(self): diff --git a/python/paddle/fluid/tests/unittests/test_fsp_op.py b/python/paddle/fluid/tests/unittests/test_fsp_op.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad7418447b4bac5e6a6034f94540091590fa189 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fsp_op.py @@ -0,0 +1,60 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +def fsp_matrix(a, b): + batch = a.shape[0] + a_channel = a.shape[1] + b_channel = b.shape[1] + h = a.shape[2] + w = a.shape[3] + a_t = a.transpose([0, 2, 3, 1]) + a_t = a_t.reshape([batch, h * w, a_channel]) + b_t = b.transpose([0, 2, 3, 1]).reshape([batch, h * w, b_channel]) + a_r = a_t.repeat( + b_channel, axis=1).reshape( + [batch, h * w, b_channel, a_channel]).transpose([0, 1, 3, 2]) + b_r = b_t.repeat( + a_channel, axis=1).reshape([batch, h * w, a_channel, b_channel]) + return np.mean(a_r * b_r, axis=1) + + +class TestFSPOp(OpTest): + def setUp(self): + self.op_type = "fsp" + self.initTestCase() + + feature_map_0 = np.random.uniform(0, 10, self.a_shape).astype('float32') + feature_map_1 = np.random.uniform(0, 10, self.b_shape).astype('float32') + + self.inputs = {'X': feature_map_0, 'Y': feature_map_1} + self.outputs = {'Out': fsp_matrix(feature_map_0, feature_map_1)} + + def initTestCase(self): + self.a_shape = (2, 16, 32, 31) + self.b_shape = (2, 28, 32, 31) + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.05) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py index af80ca6ce77a4ec187dd52863c2fe2ba278d5023..ac123ee8db26ac23bbf9454e399a592a28c91c32 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py @@ -15,6 +15,7 @@ import unittest import numpy as np import random +import os import sys import paddle @@ -23,16 +24,17 @@ import paddle.fluid.core as core from test_imperative_base import new_program_scope from paddle.fluid.imperative.base import to_variable -NUM_USERS = 100 -NUM_ITEMS = 1000 +# Can use Amusic dataset as the DeepCF describes. +DATA_PATH = os.environ.get('DATA_PATH', '') -BATCH_SIZE = 32 -NUM_BATCHES = 2 +BATCH_SIZE = int(os.environ.get('BATCH_SIZE', 128)) +NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5)) +NUM_EPOCHES = int(os.environ.get('NUM_EPOCHES', 1)) -class MLP(fluid.imperative.Layer): +class DMF(fluid.imperative.Layer): def __init__(self, name_scope): - super(MLP, self).__init__(name_scope) + super(DMF, self).__init__(name_scope) self._user_latent = fluid.imperative.FC(self.full_name(), 256) self._item_latent = fluid.imperative.FC(self.full_name(), 256) @@ -61,9 +63,9 @@ class MLP(fluid.imperative.Layer): return fluid.layers.elementwise_mul(users, items) -class DMF(fluid.imperative.Layer): +class MLP(fluid.imperative.Layer): def __init__(self, name_scope): - super(DMF, self).__init__(name_scope) + super(MLP, self).__init__(name_scope) self._user_latent = fluid.imperative.FC(self.full_name(), 256) self._item_latent = fluid.imperative.FC(self.full_name(), 256) self._match_layers = [] @@ -87,21 +89,30 @@ class DMF(fluid.imperative.Layer): class DeepCF(fluid.imperative.Layer): - def __init__(self, name_scope): + def __init__(self, name_scope, num_users, num_items, matrix): super(DeepCF, self).__init__(name_scope) - - self._user_emb = fluid.imperative.Embedding(self.full_name(), - [NUM_USERS, 256]) - self._item_emb = fluid.imperative.Embedding(self.full_name(), - [NUM_ITEMS, 256]) + self._num_users = num_users + self._num_items = num_items + self._rating_matrix = self.create_parameter( + fluid.ParamAttr(trainable=False), + matrix.shape, + matrix.dtype, + is_bias=False, + default_initializer=fluid.initializer.NumpyArrayInitializer(matrix)) + self._rating_matrix._stop_gradient = True self._mlp = MLP(self.full_name()) self._dmf = DMF(self.full_name()) self._match_fc = fluid.imperative.FC(self.full_name(), 1, act='sigmoid') def forward(self, users, items): - users_emb = self._user_emb(users) - items_emb = self._item_emb(items) + # users_emb = self._user_emb(users) + # items_emb = self._item_emb(items) + users_emb = fluid.layers.gather(self._rating_matrix, users) + items_emb = fluid.layers.gather( + fluid.layers.transpose(self._rating_matrix, [1, 0]), items) + users_emb.stop_gradient = True + items_emb.stop_gradient = True mlp_predictive = self._mlp(users_emb, items_emb) dmf_predictive = self._dmf(users_emb, items_emb) @@ -116,27 +127,79 @@ def get_data(): user_ids = [] item_ids = [] labels = [] + NUM_USERS = 100 + NUM_ITEMS = 1000 + matrix = np.zeros([NUM_USERS, NUM_ITEMS], dtype=np.float32) + for uid in range(NUM_USERS): for iid in range(NUM_ITEMS): - # 10% positive - label = float(random.randint(1, 10) == 1) + label = float(random.randint(1, 6) == 1) user_ids.append(uid) item_ids.append(iid) labels.append(label) - indices = np.arange(NUM_USERS * NUM_ITEMS) + matrix[uid, iid] = label + indices = np.arange(len(user_ids)) + np.random.shuffle(indices) + users_np = np.array(user_ids, dtype=np.int32)[indices] + items_np = np.array(item_ids, dtype=np.int32)[indices] + labels_np = np.array(labels, dtype=np.float32)[indices] + return np.expand_dims(users_np, -1), \ + np.expand_dims(items_np, -1), \ + np.expand_dims(labels_np, -1), NUM_USERS, NUM_ITEMS, matrix + + +def load_data(DATA_PATH): + sys.stderr.write('loading from %s\n' % DATA_PATH) + likes = dict() + num_users = -1 + num_items = -1 + with open(DATA_PATH, 'r') as f: + for l in f.readlines(): + uid, iid, rating = [int(v) for v in l.split('\t')] + num_users = max(num_users, uid + 1) + num_items = max(num_items, iid + 1) + if float(rating) > 0.0: + likes[(uid, iid)] = 1.0 + + user_ids = [] + item_ids = [] + labels = [] + matrix = np.zeros([num_users, num_items], dtype=np.float32) + for uid, iid in likes.keys(): + user_ids.append(uid) + item_ids.append(iid) + labels.append(1.0) + matrix[uid, iid] = 1.0 + + negative = 0 + while negative < 3: + nuid = random.randint(0, num_users - 1) + niid = random.randint(0, num_items - 1) + if (nuid, niid) not in likes: + negative += 1 + user_ids.append(nuid) + item_ids.append(niid) + labels.append(0.0) + + indices = np.arange(len(user_ids)) np.random.shuffle(indices) - users_np = np.array(user_ids, dtype=np.int64)[indices] - items_np = np.array(item_ids, dtype=np.int64)[indices] + users_np = np.array(user_ids, dtype=np.int32)[indices] + items_np = np.array(item_ids, dtype=np.int32)[indices] labels_np = np.array(labels, dtype=np.float32)[indices] return np.expand_dims(users_np, -1), \ np.expand_dims(items_np, -1), \ - np.expand_dims(labels_np, -1) + np.expand_dims(labels_np, -1), num_users, num_items, matrix class TestImperativeDeepCF(unittest.TestCase): - def test_gan_float32(self): + def test_deefcf(self): seed = 90 - users_np, items_np, labels_np = get_data() + if DATA_PATH: + (users_np, items_np, labels_np, num_users, num_items, + matrix) = load_data(DATA_PATH) + else: + (users_np, items_np, labels_np, num_users, num_items, + matrix) = get_data() startup = fluid.Program() startup.random_seed = seed @@ -145,11 +208,11 @@ class TestImperativeDeepCF(unittest.TestCase): scope = fluid.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): - users = fluid.layers.data('users', [1], dtype='int64') - items = fluid.layers.data('items', [1], dtype='int64') + users = fluid.layers.data('users', [1], dtype='int32') + items = fluid.layers.data('items', [1], dtype='int32') labels = fluid.layers.data('labels', [1], dtype='float32') - deepcf = DeepCF('deepcf') + deepcf = DeepCF('deepcf', num_users, num_items, matrix) prediction = deepcf(users, items) loss = fluid.layers.reduce_sum( fluid.layers.log_loss(prediction, labels)) @@ -159,35 +222,44 @@ class TestImperativeDeepCF(unittest.TestCase): exe = fluid.Executor(fluid.CPUPlace( ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) exe.run(startup) - for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE): - static_loss = exe.run( - main, - feed={ - users.name: users_np[slice:slice + BATCH_SIZE], - items.name: items_np[slice:slice + BATCH_SIZE], - labels.name: labels_np[slice:slice + BATCH_SIZE] - }, - fetch_list=[loss])[0] - sys.stderr.write('static loss %s\n' % static_loss) + for e in range(NUM_EPOCHES): + sys.stderr.write('epoch %d\n' % e) + for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE): + if slice + BATCH_SIZE >= users_np.shape[0]: + break + static_loss = exe.run( + main, + feed={ + users.name: users_np[slice:slice + BATCH_SIZE], + items.name: items_np[slice:slice + BATCH_SIZE], + labels.name: labels_np[slice:slice + BATCH_SIZE] + }, + fetch_list=[loss])[0] + sys.stderr.write('static loss %s\n' % static_loss) with fluid.imperative.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed - deepcf = DeepCF('deepcf') - for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE): - prediction = deepcf( - to_variable(users_np[slice:slice + BATCH_SIZE]), - to_variable(items_np[slice:slice + BATCH_SIZE])) - loss = fluid.layers.reduce_sum( - fluid.layers.log_loss(prediction, - to_variable(labels_np[slice:slice + - BATCH_SIZE]))) - loss._backward() - adam = fluid.optimizer.AdamOptimizer(0.01) - adam.minimize(loss) - deepcf.clear_gradients() - dy_loss = loss._numpy() + deepcf = DeepCF('deepcf', num_users, num_items, matrix) + adam = fluid.optimizer.AdamOptimizer(0.01) + for e in range(NUM_EPOCHES): + sys.stderr.write('epoch %d\n' % e) + for slice in range(0, BATCH_SIZE * NUM_BATCHES, BATCH_SIZE): + if slice + BATCH_SIZE >= users_np.shape[0]: + break + prediction = deepcf( + to_variable(users_np[slice:slice + BATCH_SIZE]), + to_variable(items_np[slice:slice + BATCH_SIZE])) + loss = fluid.layers.reduce_sum( + fluid.layers.log_loss(prediction, + to_variable(labels_np[ + slice:slice + BATCH_SIZE]))) + loss._backward() + adam.minimize(loss) + deepcf.clear_gradients() + dy_loss = loss._numpy() + sys.stderr.write('dynamic loss: %s %s\n' % (slice, dy_loss)) self.assertEqual(static_loss, dy_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 3b602303ae9a183c7b66f5613321f58898fdfcc2..460ba65a48c863315cda4847aee1b4e2366bba96 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -59,7 +59,7 @@ class SimpleLSTMRNN(fluid.imperative.Layer): dtype="float32", default_initializer=fluid.initializer.UniformInitializer( low=-self._init_scale, high=self._init_scale)) - self.weight_1_arr.append(weight_1) + self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( attr=fluid.ParamAttr( initializer=fluid.initializer.UniformInitializer( @@ -67,7 +67,7 @@ class SimpleLSTMRNN(fluid.imperative.Layer): shape=[self._hidden_size * 4], dtype="float32", default_initializer=fluid.initializer.Constant(0.0)) - self.bias_arr.append(bias_1) + self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1)) def forward(self, input_embedding, init_hidden=None, init_cell=None): self.cell_array = [] @@ -242,7 +242,7 @@ class TestImperativePtbRnn(unittest.TestCase): dy_loss = None last_hidden = None last_cell = None - batch_num = 50 + batch_num = 200 for i in range(batch_num): x_data = np.arange(12).reshape(4, 3).astype('int64') @@ -264,8 +264,10 @@ class TestImperativePtbRnn(unittest.TestCase): dy_param_init[param.name] = param._numpy() dy_loss._backward() sgd.minimize(dy_loss) - for param in ptb_model.parameters(): - dy_param_updated[param.name] = param._numpy() + ptb_model.clear_gradients() + if i == batch_num - 1: + for param in ptb_model.parameters(): + dy_param_updated[param.name] = param._numpy() with new_program_scope(): fluid.default_startup_program().random_seed = seed @@ -323,25 +325,28 @@ class TestImperativePtbRnn(unittest.TestCase): }, fetch_list=fetch_list) static_loss_value = out[0] - static_last_cell_value = out[1] - static_last_hidden_value = out[2] - for k in range(3, len(out)): - static_param_updated[static_param_name_list[k - 3]] = out[k] + static_last_hidden_value = out[1] + static_last_cell_value = out[2] + if i == batch_num - 1: + for k in range(3, len(out)): + static_param_updated[static_param_name_list[k - + 3]] = out[k] + + self.assertTrue(np.allclose(static_loss_value, dy_loss._numpy())) + self.assertTrue(np.allclose(static_last_cell_value, last_cell._numpy())) + self.assertTrue( + np.allclose(static_last_hidden_value, last_hidden._numpy())) + for key, value in six.iteritems(static_param_init): + # print("static_init name: {}, value {}".format(key, value)) + # print("dy_init name: {}, value {}".format(key, dy_param_init[key])) + self.assertTrue(np.allclose(value, dy_param_init[key], atol=1e-5)) + for key, value in six.iteritems(static_param_updated): + # print("static name: {}, value {}".format(key, value)) + # print("dy name: {}, value {}".format(key, dy_param_updated[key])) self.assertTrue( - np.allclose(static_loss_value.all(), dy_loss._numpy().all())) - self.assertTrue( - np.allclose(static_last_cell_value.all(), - last_cell._numpy().all())) - self.assertTrue( - np.allclose(static_last_hidden_value.all(), - last_hidden._numpy().all())) - for key, value in six.iteritems(static_param_init): - self.assertTrue( - np.allclose(value.all(), dy_param_init[key].all())) - for key, value in six.iteritems(static_param_updated): - self.assertTrue( - np.allclose(value.all(), dy_param_updated[key].all())) + np.allclose( + value, dy_param_updated[key], atol=1e-5)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..b06d3e8894072943b06456340f928cda260763c3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py @@ -0,0 +1,1096 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import paddle.fluid as fluid +from paddle.fluid.imperative import Embedding, LayerNorm, FC, to_variable, Layer, guard +from test_imperative_base import new_program_scope +from paddle.fluid import core +import numpy as np +import six +np.set_printoptions(suppress=True) + + +# Copy from models +class TrainTaskConfig(object): + # support both CPU and GPU now. + use_gpu = True + # the epoch number to train. + pass_num = 30 + # the number of sequences contained in a mini-batch. + # deprecated, set batch_size in args. + batch_size = 32 + # the hyper parameters for Adam optimizer. + # This static learning_rate will be multiplied to the LearningRateScheduler + # derived learning rate the to get the final learning rate. + learning_rate = 2.0 + beta1 = 0.9 + beta2 = 0.997 + eps = 1e-9 + # the parameters for learning rate scheduling. + warmup_steps = 8000 + # the weight used to mix up the ground-truth distribution and the fixed + # uniform distribution in label smoothing when training. + # Set this as zero if label smoothing is not wanted. + label_smooth_eps = 0.1 + # the directory for saving trained models. + model_dir = "trained_models" + # the directory for saving checkpoints. + ckpt_dir = "trained_ckpts" + # the directory for loading checkpoint. + # If provided, continue training from the checkpoint. + ckpt_path = None + # the parameter to initialize the learning rate scheduler. + # It should be provided if use checkpoints, since the checkpoint doesn't + # include the training step counter currently. + start_step = 0 + # the frequency to save trained models. + save_freq = 10000 + + +class InferTaskConfig(object): + use_gpu = True + # the number of examples in one run for sequence generation. + batch_size = 10 + # the parameters for beam search. + beam_size = 5 + max_out_len = 256 + # the number of decoded sentences to output. + n_best = 1 + # the flags indicating whether to output the special tokens. + output_bos = False + output_eos = False + output_unk = True + # the directory for loading the trained model. + model_path = "trained_models/pass_1.infer.model" + + +class ModelHyperParams(object): + # These following five vocabularies related configurations will be set + # automatically according to the passed vocabulary path and special tokens. + # size of source word dictionary. + src_vocab_size = 10000 + # size of target word dictionay + trg_vocab_size = 10000 + # index for token + bos_idx = 0 + # index for token + eos_idx = 1 + # index for token + unk_idx = 2 + # max length of sequences deciding the size of position encoding table. + max_length = 4 + # the dimension for word embeddings, which is also the last dimension of + # the input and output of multi-head attention, position-wise feed-forward + # networks, encoder and decoder. + d_model = 512 + # size of the hidden layer in position-wise feed-forward networks. + d_inner_hid = 2048 + # the dimension that keys are projected to for dot-product attention. + d_key = 64 + # the dimension that values are projected to for dot-product attention. + d_value = 64 + # number of head used in multi-head attention. + n_head = 8 + # number of sub-layers to be stacked in the encoder and decoder. + n_layer = 6 + # dropout rates of different modules. + prepostprocess_dropout = 0.1 + attention_dropout = 0.1 + relu_dropout = 0.1 + # to process before each sub-layer + preprocess_cmd = "n" # layer normalization + # to process after each sub-layer + postprocess_cmd = "da" # dropout + residual connection + # random seed used in dropout for CE. + dropout_seed = 1 + # the flag indicating whether to share embedding and softmax weights. + # vocabularies in source and target should be same for weight sharing. + weight_sharing = True + + +def merge_cfg_from_list(cfg_list, g_cfgs): + """ + Set the above global configurations using the cfg_list. + """ + assert len(cfg_list) % 2 == 0 + for key, value in zip(cfg_list[0::2], cfg_list[1::2]): + for g_cfg in g_cfgs: + if hasattr(g_cfg, key): + try: + value = eval(value) + except Exception: # for file path + pass + setattr(g_cfg, key, value) + break + + +def position_encoding_init(n_position, d_pos_vec): + """ + Generate the initial values for the sinusoid position encoding table. + """ + channels = d_pos_vec + position = np.arange(n_position) + num_timescales = channels // 2 + log_timescale_increment = (np.log(float(1e4) / float(1)) / + (num_timescales - 1)) + inv_timescales = np.exp(np.arange( + num_timescales)) * -log_timescale_increment + scaled_time = np.expand_dims(position, 1) * np.expand_dims(inv_timescales, + 0) + signal = np.concatenate([np.sin(scaled_time), np.cos(scaled_time)], axis=1) + signal = np.pad(signal, [[0, 0], [0, np.mod(channels, 2)]], 'constant') + position_enc = signal + return position_enc.astype("float32") + + +def create_data(is_static=False): + if is_static: + return [ + src_word_np, src_pos_np, src_slf_attn_bias_np, trg_word_np, + trg_pos_np, trg_slf_attn_bias_np, trg_src_attn_bias_np, lbl_word_np, + lbl_weight_np + ] + else: + enc_inputs = [ + to_variable(src_word_np), to_variable(src_pos_np), + to_variable(src_slf_attn_bias_np) + ] + dec_inputs = [ + to_variable(trg_word_np), to_variable(trg_pos_np), + to_variable(trg_slf_attn_bias_np), to_variable(trg_src_attn_bias_np) + ] + label = to_variable(lbl_word_np) + weight = to_variable(lbl_weight_np) + return enc_inputs, dec_inputs, label, weight + + +def create_feed_dict_list(data, init=False): + if init: + data_input_names = encoder_data_input_fields + \ + decoder_data_input_fields[:-1] + label_data_input_fields + pos_enc_param_names + else: + data_input_names = encoder_data_input_fields + \ + decoder_data_input_fields[:-1] + label_data_input_fields + feed_dict_list = dict() + for i in range(len(data_input_names)): + feed_dict_list[data_input_names[i]] = data[i] + return feed_dict_list + + +def make_all_inputs(input_fields): + """ + Define the input data layers for the transformer model. + """ + inputs = [] + for input_field in input_fields: + input_var = fluid.layers.data( + name=input_field, + shape=input_descs[input_field][0], + dtype=input_descs[input_field][1], + lod_level=input_descs[input_field][2] + if len(input_descs[input_field]) == 3 else 0, + append_batch_size=False) + inputs.append(input_var) + return inputs + + +# The placeholder for batch_size in compile time. Must be -1 currently to be +# consistent with some ops' infer-shape output in compile time, such as the +# sequence_expand op used in beamsearch decoder. +batch_size = 32 +# The placeholder for squence length in compile time. +seq_len = ModelHyperParams.max_length +# Here list the data shapes and data types of all inputs. +# The shapes here act as placeholder and are set to pass the infer-shape in +# compile time. +input_descs = { + # The actual data shape of src_word is: + # [batch_size, max_src_len_in_batch, 1] + "src_word": [(batch_size, seq_len, 1), "int64", 2], + # The actual data shape of src_pos is: + # [batch_size, max_src_len_in_batch, 1] + "src_pos": [(batch_size, seq_len, 1), "int64"], + # This input is used to remove attention weights on paddings in the + # encoder. + # The actual data shape of src_slf_attn_bias is: + # [batch_size, n_head, max_src_len_in_batch, max_src_len_in_batch] + "src_slf_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len, + seq_len), "float32"], + # The actual data shape of trg_word is: + # [batch_size, max_trg_len_in_batch, 1] + "trg_word": [(batch_size, seq_len, 1), "int64", + 2], # lod_level is only used in fast decoder. + # The actual data shape of trg_pos is: + # [batch_size, max_trg_len_in_batch, 1] + "trg_pos": [(batch_size, seq_len, 1), "int64"], + # This input is used to remove attention weights on paddings and + # subsequent words in the decoder. + # The actual data shape of trg_slf_attn_bias is: + # [batch_size, n_head, max_trg_len_in_batch, max_trg_len_in_batch] + "trg_slf_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len, + seq_len), "float32"], + # This input is used to remove attention weights on paddings of the source + # input in the encoder-decoder attention. + # The actual data shape of trg_src_attn_bias is: + # [batch_size, n_head, max_trg_len_in_batch, max_src_len_in_batch] + "trg_src_attn_bias": [(batch_size, ModelHyperParams.n_head, seq_len, + seq_len), "float32"], + # This input is used in independent decoder program for inference. + # The actual data shape of enc_output is: + # [batch_size, max_src_len_in_batch, d_model] + "enc_output": [(batch_size, seq_len, ModelHyperParams.d_model), "float32"], + # The actual data shape of label_word is: + # [batch_size * max_trg_len_in_batch, 1] + "lbl_word": [(batch_size * seq_len, 1), "int64"], + # This input is used to mask out the loss of paddding tokens. + # The actual data shape of label_weight is: + # [batch_size * max_trg_len_in_batch, 1] + "lbl_weight": [(batch_size * seq_len, 1), "float32"], + # This input is used in beam-search decoder. + "init_score": [(batch_size, 1), "float32", 2], + # This input is used in beam-search decoder for the first gather + # (cell states updation) + "init_idx": [(batch_size, ), "int32"], +} + +# Names of word embedding table which might be reused for weight sharing. +word_emb_param_names = ( + "src_word_emb_table", + "trg_word_emb_table", ) +# Names of position encoding table which will be initialized externally. +pos_enc_param_names = ( + "src_pos_enc_table", + "trg_pos_enc_table", ) +# separated inputs for different usages. +encoder_data_input_fields = ( + "src_word", + "src_pos", + "src_slf_attn_bias", ) +decoder_data_input_fields = ( + "trg_word", + "trg_pos", + "trg_slf_attn_bias", + "trg_src_attn_bias", + "enc_output", ) +label_data_input_fields = ( + "lbl_word", + "lbl_weight", ) +# In fast decoder, trg_pos (only containing the current time step) is generated +# by ops and trg_slf_attn_bias is not needed. +fast_decoder_data_input_fields = ( + "trg_word", + "init_score", + "init_idx", + "trg_src_attn_bias", ) +# if we use py_reader +use_py_reader = False + +# if we run sync mode +sync = False + +# how many batches we use +batch_num = 2 + +np.random.seed = 1 +src_word_np = np.random.randint( + 1, + ModelHyperParams.src_vocab_size - 1, + size=(batch_size, seq_len, 1), + dtype='int64') +src_pos_np = np.random.randint( + 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') +src_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, + seq_len, seq_len).astype('float32') + +trg_word_np = np.random.randint( + 1, + ModelHyperParams.src_vocab_size - 1, + size=(batch_size, seq_len, 1), + dtype='int64') +trg_pos_np = np.random.randint( + 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') +trg_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, + seq_len, seq_len).astype('float32') +trg_src_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, + seq_len, seq_len).astype('float32') + +lbl_word_np = np.random.randint( + 1, + ModelHyperParams.src_vocab_size - 1, + size=(batch_size * seq_len, 1), + dtype='int64') +lbl_weight_np = np.random.randn(batch_size * seq_len, 1).astype('float32') + +# np.random.seed = 1 +# src_word_np = np.arange(0, 10).reshape([batch_size, seq_len, 1]).astype('int64') +# src_pos_np = np.random.randint( +# 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') +# src_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, +# seq_len, seq_len).astype('float32') +# +# trg_word_np = np.arange(0, 10).reshape([batch_size, seq_len, 1]).astype('int64') +# trg_pos_np = np.random.randint( +# 1, seq_len, size=(batch_size, seq_len, 1), dtype='int64') +# trg_slf_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, +# seq_len, seq_len).astype('float32') +# trg_src_attn_bias_np = np.random.randn(batch_size, ModelHyperParams.n_head, +# seq_len, seq_len).astype('float32') +# +# lbl_word_np = np.arange(0, 10).reshape([batch_size * seq_len, 1]).astype('int64') +# lbl_weight_np = np.random.randn(batch_size * seq_len, 1).astype('float32') +# +pos_inp1 = position_encoding_init(ModelHyperParams.max_length, + ModelHyperParams.d_model) +pos_inp2 = position_encoding_init(ModelHyperParams.max_length, + ModelHyperParams.d_model) + + +class PrePostProcessLayer(Layer): + def __init__(self, name_scope, process_cmd, shape_len=None): + super(PrePostProcessLayer, self).__init__(name_scope) + for cmd in process_cmd: + if cmd == "n": + self._layer_norm = LayerNorm( + name_scope=self.full_name(), + begin_norm_axis=shape_len - 1, + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(1.)), + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(0.))) + + def forward(self, prev_out, out, process_cmd, dropout_rate=0.): + for cmd in process_cmd: + if cmd == "a": # add residual connection + out = out + prev_out if prev_out else out + elif cmd == "n": # add layer normalization + out = self._layer_norm(out) + elif cmd == "d": # add dropout + if dropout_rate: + out = fluid.layers.dropout( + out, + dropout_prob=dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False) + return out + + +class PositionwiseFeedForwardLayer(Layer): + def __init__(self, name_scope, d_inner_hid, d_hid, dropout_rate): + super(PositionwiseFeedForwardLayer, self).__init__(name_scope) + self._i2h = FC(name_scope=self.full_name(), + size=d_inner_hid, + num_flatten_dims=2, + act="relu") + self._h2o = FC(name_scope=self.full_name(), + size=d_hid, + num_flatten_dims=2) + self._dropout_rate = dropout_rate + + def forward(self, x): + hidden = self._i2h(x) + if self._dropout_rate: + hidden = fluid.layers.dropout( + hidden, + dropout_prob=self._dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False) + out = self._h2o(hidden) + return out + + +class MultiHeadAttentionLayer(Layer): + def __init__(self, + name_scope, + d_key, + d_value, + d_model, + n_head=1, + dropout_rate=0., + cache=None, + gather_idx=None, + static_kv=False): + super(MultiHeadAttentionLayer, self).__init__(name_scope) + self._n_head = n_head + self._d_key = d_key + self._d_value = d_value + self._d_model = d_model + self._dropout_rate = dropout_rate + self._q_fc = FC(name_scope=self.full_name(), + size=d_key * n_head, + bias_attr=False, + num_flatten_dims=2) + self._k_fc = FC(name_scope=self.full_name(), + size=d_key * n_head, + bias_attr=False, + num_flatten_dims=2) + self._v_fc = FC(name_scope=self.full_name(), + size=d_value * n_head, + bias_attr=False, + num_flatten_dims=2) + self._proj_fc = FC(name_scope=self.full_name(), + size=self._d_model, + bias_attr=False, + num_flatten_dims=2) + + def forward(self, queries, keys, values, attn_bias): + # compute q ,k ,v + keys = queries if keys is None else keys + values = keys if values is None else values + + q = self._q_fc(queries) + k = self._k_fc(keys) + v = self._v_fc(values) + + # split head + reshaped_q = fluid.layers.reshape( + x=q, shape=[0, 0, self._n_head, self._d_key], inplace=False) + transpose_q = fluid.layers.transpose(x=reshaped_q, perm=[0, 2, 1, 3]) + reshaped_k = fluid.layers.reshape( + x=k, shape=[0, 0, self._n_head, self._d_key], inplace=False) + transpose_k = fluid.layers.transpose(x=reshaped_k, perm=[0, 2, 1, 3]) + reshaped_v = fluid.layers.reshape( + x=v, shape=[0, 0, self._n_head, self._d_value], inplace=False) + transpose_v = fluid.layers.transpose(x=reshaped_v, perm=[0, 2, 1, 3]) + + #scale dot product attention + product = fluid.layers.matmul( + x=transpose_q, + y=transpose_k, + transpose_y=True, + alpha=self._d_model**-0.5) + if attn_bias: + product += attn_bias + weights = fluid.layers.softmax(product) + if self._dropout_rate: + weights_droped = fluid.layers.dropout( + weights, + dropout_prob=self._dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False) + out = fluid.layers.matmul(weights_droped, transpose_v) + else: + out = fluid.layers.matmul(weights, transpose_v) + + # combine heads + if len(out.shape) != 4: + raise ValueError("Input(x) should be a 4-D Tensor.") + trans_x = fluid.layers.transpose(out, perm=[0, 2, 1, 3]) + final_out = fluid.layers.reshape( + x=trans_x, + shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]], + inplace=False) + + # fc to output + proj_out = self._proj_fc(final_out) + return proj_out + + +class EncoderSubLayer(Layer): + def __init__(self, + name_scope, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da"): + + super(EncoderSubLayer, self).__init__(name_scope) + self._preprocess_cmd = preprocess_cmd + self._postprocess_cmd = postprocess_cmd + self._prepostprocess_dropout = prepostprocess_dropout + + self._preprocess_layer = PrePostProcessLayer(self.full_name(), + self._preprocess_cmd, 3) + self._multihead_attention_layer = MultiHeadAttentionLayer( + self.full_name(), d_key, d_value, d_model, n_head, + attention_dropout) + self._postprocess_layer = PrePostProcessLayer( + self.full_name(), self._postprocess_cmd, None) + self._preprocess_layer2 = PrePostProcessLayer(self.full_name(), + self._preprocess_cmd, 3) + self._positionwise_feed_forward = PositionwiseFeedForwardLayer( + self.full_name(), d_inner_hid, d_model, relu_dropout) + self._postprocess_layer2 = PrePostProcessLayer( + self.full_name(), self._postprocess_cmd, None) + + def forward(self, enc_input, attn_bias): + pre_process_multihead = self._preprocess_layer( + None, enc_input, self._preprocess_cmd, self._prepostprocess_dropout) + attn_output = self._multihead_attention_layer(pre_process_multihead, + None, None, attn_bias) + attn_output = self._postprocess_layer(enc_input, attn_output, + self._postprocess_cmd, + self._prepostprocess_dropout) + pre_process2_output = self._preprocess_layer2( + None, attn_output, self._preprocess_cmd, + self._prepostprocess_dropout) + ffd_output = self._positionwise_feed_forward(pre_process2_output) + return self._postprocess_layer2(attn_output, ffd_output, + self._postprocess_cmd, + self._prepostprocess_dropout) + + +class EncoderLayer(Layer): + def __init__(self, + name_scope, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd="n", + postprocess_cmd="da"): + + super(EncoderLayer, self).__init__(name_scope) + self._preprocess_cmd = preprocess_cmd + self._encoder_sublayers = list() + self._prepostprocess_dropout = prepostprocess_dropout + self._n_layer = n_layer + self._preprocess_layer = PrePostProcessLayer(self.full_name(), + self._preprocess_cmd, 3) + for i in range(n_layer): + self._encoder_sublayers.append( + self.add_sublayer( + 'esl_%d' % i, + EncoderSubLayer( + self.full_name(), n_head, d_key, d_value, d_model, + d_inner_hid, prepostprocess_dropout, attention_dropout, + relu_dropout, preprocess_cmd, postprocess_cmd))) + + def forward(self, enc_input, attn_bias): + for i in range(self._n_layer): + enc_output = self._encoder_sublayers[i](enc_input, attn_bias) + enc_input = enc_output + + return self._preprocess_layer(None, enc_output, self._preprocess_cmd, + self._prepostprocess_dropout) + + +class PrepareEncoderDecoderLayer(Layer): + def __init__(self, + name_scope, + src_vocab_size, + src_emb_dim, + src_max_len, + dropout_rate, + word_emb_param_name=None, + pos_enc_param_name=None): + super(PrepareEncoderDecoderLayer, self).__init__(name_scope) + self._src_max_len = src_max_len + self._src_emb_dim = src_emb_dim + self._src_vocab_size = src_vocab_size + self._dropout_rate = dropout_rate + self._input_emb = Embedding( + name_scope=self.full_name(), + size=[src_vocab_size, src_emb_dim], + padding_idx=0, + param_attr=fluid.ParamAttr( + name=word_emb_param_name, + initializer=fluid.initializer.Normal(0., src_emb_dim**-0.5))) + + if pos_enc_param_name is pos_enc_param_names[0]: + pos_inp = pos_inp1 + else: + pos_inp = pos_inp2 + self._pos_emb = Embedding( + name_scope=self.full_name(), + size=[self._src_max_len, src_emb_dim], + param_attr=fluid.ParamAttr( + name=pos_enc_param_name, + initializer=fluid.initializer.NumpyArrayInitializer(pos_inp), + trainable=False)) + + # use in imperative_mode to fit different length batch + # self._pos_emb._w = to_variable( + # position_encoding_init(self._src_max_len, self._src_emb_dim)) + + def forward(self, src_word, src_pos): + src_word_emb = self._input_emb(src_word) + src_word_emb = fluid.layers.scale( + x=src_word_emb, scale=self._src_emb_dim**0.5) + # # TODO change this to fit dynamic length input + src_pos_emb = self._pos_emb(src_pos) + src_pos_emb.stop_gradient = True + enc_input = src_word_emb + src_pos_emb + return fluid.layers.dropout( + enc_input, + dropout_prob=self._dropout_rate, + seed=ModelHyperParams.dropout_seed, + is_test=False) if self._dropout_rate else enc_input + + +class WrapEncoderLayer(Layer): + def __init__(self, name_cope, src_vocab_size, max_length, n_layer, n_head, + d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, + attention_dropout, relu_dropout, preprocess_cmd, + postprocess_cmd, weight_sharing): + """ + The wrapper assembles together all needed layers for the encoder. + """ + super(WrapEncoderLayer, self).__init__(name_cope) + + self._prepare_encoder_layer = PrepareEncoderDecoderLayer( + self.full_name(), + src_vocab_size, + d_model, + max_length, + prepostprocess_dropout, + word_emb_param_name=word_emb_param_names[0], + pos_enc_param_name=pos_enc_param_names[0]) + self._encoder = EncoderLayer( + self.full_name(), n_layer, n_head, d_key, d_value, d_model, + d_inner_hid, prepostprocess_dropout, attention_dropout, + relu_dropout, preprocess_cmd, postprocess_cmd) + + def forward(self, enc_inputs): + src_word, src_pos, src_slf_attn_bias = enc_inputs + enc_input = self._prepare_encoder_layer(src_word, src_pos) + enc_output = self._encoder(enc_input, src_slf_attn_bias) + return enc_output + + +class DecoderSubLayer(Layer): + def __init__(self, + name_scope, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + cache=None, + gather_idx=None): + super(DecoderSubLayer, self).__init__(name_scope) + self._postprocess_cmd = postprocess_cmd + self._preprocess_cmd = preprocess_cmd + self._prepostprcess_dropout = prepostprocess_dropout + self._pre_process_layer = PrePostProcessLayer(self.full_name(), + preprocess_cmd, 3) + self._multihead_attention_layer = MultiHeadAttentionLayer( + self.full_name(), + d_key, + d_value, + d_model, + n_head, + attention_dropout, + cache=cache, + gather_idx=gather_idx) + self._post_process_layer = PrePostProcessLayer(self.full_name(), + postprocess_cmd, None) + self._pre_process_layer2 = PrePostProcessLayer(self.full_name(), + preprocess_cmd, 3) + self._multihead_attention_layer2 = MultiHeadAttentionLayer( + self.full_name(), + d_key, + d_value, + d_model, + n_head, + attention_dropout, + cache=cache, + gather_idx=gather_idx, + static_kv=True) + self._post_process_layer2 = PrePostProcessLayer(self.full_name(), + postprocess_cmd, None) + self._pre_process_layer3 = PrePostProcessLayer(self.full_name(), + preprocess_cmd, 3) + self._positionwise_feed_forward_layer = PositionwiseFeedForwardLayer( + self.full_name(), d_inner_hid, d_model, relu_dropout) + self._post_process_layer3 = PrePostProcessLayer(self.full_name(), + postprocess_cmd, None) + + def forward(self, dec_input, enc_output, slf_attn_bias, dec_enc_attn_bias): + pre_process_rlt = self._pre_process_layer( + None, dec_input, self._preprocess_cmd, self._prepostprcess_dropout) + slf_attn_output = self._multihead_attention_layer(pre_process_rlt, None, + None, slf_attn_bias) + slf_attn_output_pp = self._post_process_layer( + dec_input, slf_attn_output, self._postprocess_cmd, + self._prepostprcess_dropout) + pre_process_rlt2 = self._pre_process_layer2(None, slf_attn_output_pp, + self._preprocess_cmd, + self._prepostprcess_dropout) + enc_attn_output_pp = self._multihead_attention_layer2( + pre_process_rlt2, enc_output, enc_output, dec_enc_attn_bias) + enc_attn_output = self._post_process_layer2( + slf_attn_output, enc_attn_output_pp, self._postprocess_cmd, + self._prepostprcess_dropout) + pre_process_rlt3 = self._pre_process_layer3(None, enc_attn_output, + self._preprocess_cmd, + self._prepostprcess_dropout) + ffd_output = self._positionwise_feed_forward_layer(pre_process_rlt3) + dec_output = self._post_process_layer3(enc_attn_output, ffd_output, + self._postprocess_cmd, + self._prepostprcess_dropout) + return dec_output + + +class DecoderLayer(Layer): + def __init__(self, + name_scope, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + caches=None, + gather_idx=None): + super(DecoderLayer, self).__init__(name_scope) + self._pre_process_layer = PrePostProcessLayer(self.full_name(), + preprocess_cmd, 3) + self._decoder_sub_layers = list() + self._n_layer = n_layer + self._preprocess_cmd = preprocess_cmd + self._prepostprocess_dropout = prepostprocess_dropout + for i in range(n_layer): + self._decoder_sub_layers.append( + self.add_sublayer( + 'dsl_%d' % i, + DecoderSubLayer( + self.full_name(), + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + cache=None if caches is None else caches[i], + gather_idx=gather_idx))) + + def forward(self, dec_input, enc_output, dec_slf_attn_bias, + dec_enc_attn_bias): + for i in range(self._n_layer): + tmp_dec_output = self._decoder_sub_layers[i]( + dec_input, enc_output, dec_slf_attn_bias, dec_enc_attn_bias) + dec_input = tmp_dec_output + + dec_output = self._pre_process_layer(None, tmp_dec_output, + self._preprocess_cmd, + self._prepostprocess_dropout) + return dec_output + + +class WrapDecoderLayer(Layer): + def __init__(self, + name_scope, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + caches=None, + gather_idx=None): + """ + The wrapper assembles together all needed layers for the encoder. + """ + super(WrapDecoderLayer, self).__init__(name_scope) + + self._prepare_decoder_layer = PrepareEncoderDecoderLayer( + self.full_name(), + trg_vocab_size, + d_model, + max_length, + prepostprocess_dropout, + word_emb_param_name=word_emb_param_names[1], + pos_enc_param_name=pos_enc_param_names[1]) + self._decoder_layer = DecoderLayer( + self.full_name(), + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + caches=caches, + gather_idx=gather_idx) + self._weight_sharing = weight_sharing + if not weight_sharing: + self._fc = FC(self.full_name(), + size=trg_vocab_size, + bias_attr=False) + + def forward(self, dec_inputs=None, enc_output=None): + trg_word, trg_pos, trg_slf_attn_bias, trg_src_attn_bias = dec_inputs + dec_input = self._prepare_decoder_layer(trg_word, trg_pos) + dec_output = self._decoder_layer(dec_input, enc_output, + trg_slf_attn_bias, trg_src_attn_bias) + + dec_output_reshape = fluid.layers.reshape( + dec_output, shape=[-1, dec_output.shape[-1]], inplace=False) + + if self._weight_sharing: + predict = fluid.layers.matmul( + x=dec_output_reshape, + y=self._prepare_decoder_layer._input_emb._w, + transpose_y=True) + else: + predict = self._fc(dec_output_reshape) + + if dec_inputs is None: + # Return probs for independent decoder program. + predict_out = fluid.layers.softmax(predict) + return predict_out + return predict + + +class TransFormer(Layer): + def __init__(self, + name_scope, + src_vocab_size, + trg_vocab_size, + max_length, + n_layer, + n_head, + d_key, + d_value, + d_model, + d_inner_hid, + prepostprocess_dropout, + attention_dropout, + relu_dropout, + preprocess_cmd, + postprocess_cmd, + weight_sharing, + label_smooth_eps, + use_py_reader=False, + is_test=False): + super(TransFormer, self).__init__(name_scope) + self._label_smooth_eps = label_smooth_eps + self._trg_vocab_size = trg_vocab_size + if weight_sharing: + assert src_vocab_size == trg_vocab_size, ( + "Vocabularies in source and target should be same for weight sharing." + ) + self._wrap_encoder_layer = WrapEncoderLayer( + self.full_name(), src_vocab_size, max_length, n_layer, n_head, + d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, + attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, + weight_sharing) + self._wrap_decoder_layer = WrapDecoderLayer( + self.full_name(), trg_vocab_size, max_length, n_layer, n_head, + d_key, d_value, d_model, d_inner_hid, prepostprocess_dropout, + attention_dropout, relu_dropout, preprocess_cmd, postprocess_cmd, + weight_sharing) + + if weight_sharing: + self._wrap_decoder_layer._prepare_decoder_layer._input_emb._w = self._wrap_encoder_layer._prepare_encoder_layer._input_emb._w + + def forward(self, enc_inputs, dec_inputs, label, weights): + enc_output = self._wrap_encoder_layer(enc_inputs) + predict = self._wrap_decoder_layer(dec_inputs, enc_output) + if self._label_smooth_eps: + label_out = fluid.layers.label_smooth( + label=fluid.layers.one_hot( + input=label, depth=self._trg_vocab_size), + epsilon=self._label_smooth_eps) + + cost = fluid.layers.softmax_with_cross_entropy( + logits=predict, + label=label_out, + soft_label=True if self._label_smooth_eps else False) + weighted_cost = cost * weights + sum_cost = fluid.layers.reduce_sum(weighted_cost) + token_num = fluid.layers.reduce_sum(weights) + token_num.stop_gradient = True + avg_cost = sum_cost / token_num + return sum_cost, avg_cost, predict, token_num + + +class TestImperativeTransformer(unittest.TestCase): + def test_transformer_float32(self): + seed = 90 + with guard(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + transformer = TransFormer( + 'transformer', + ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size, + ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, + ModelHyperParams.n_head, + ModelHyperParams.d_key, + ModelHyperParams.d_value, + ModelHyperParams.d_model, + ModelHyperParams.d_inner_hid, + ModelHyperParams.prepostprocess_dropout, + ModelHyperParams.attention_dropout, + ModelHyperParams.relu_dropout, + ModelHyperParams.preprocess_cmd, + ModelHyperParams.postprocess_cmd, + ModelHyperParams.weight_sharing, + TrainTaskConfig.label_smooth_eps, + use_py_reader=use_py_reader, + is_test=False) + if sync: + lr_decay = fluid.layers.learning_rate_scheduler.noam_decay( + ModelHyperParams.d_model, TrainTaskConfig.warmup_steps) + with fluid.default_main_program()._lr_schedule_guard(): + learning_rate = lr_decay * TrainTaskConfig.learning_rate + optimizer = fluid.optimizer.Adam( + learning_rate=learning_rate, + beta1=TrainTaskConfig.beta1, + beta2=TrainTaskConfig.beta2, + epsilon=TrainTaskConfig.eps) + else: + optimizer = fluid.optimizer.SGD(learning_rate=0.003) + dy_param_init = dict() + dy_param_updated = dict() + for i in range(batch_num): + enc_inputs, dec_inputs, label, weights = create_data() + dy_sum_cost, dy_avg_cost, dy_predict, dy_token_num = transformer( + enc_inputs, dec_inputs, label, weights) + if i == 0: + for param in transformer.parameters(): + dy_param_init[param.name] = param._numpy() + + dy_avg_cost._backward() + optimizer.minimize(dy_avg_cost) + transformer.clear_gradients() + if i == batch_num - 1: + for param in transformer.parameters(): + dy_param_updated[param.name] = param._numpy() + + with new_program_scope(): + fluid.default_startup_program().random_seed = seed + fluid.default_main_program().random_seed = seed + transformer = TransFormer( + 'transformer', + ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size, + ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, + ModelHyperParams.n_head, + ModelHyperParams.d_key, + ModelHyperParams.d_value, + ModelHyperParams.d_model, + ModelHyperParams.d_inner_hid, + ModelHyperParams.prepostprocess_dropout, + ModelHyperParams.attention_dropout, + ModelHyperParams.relu_dropout, + ModelHyperParams.preprocess_cmd, + ModelHyperParams.postprocess_cmd, + ModelHyperParams.weight_sharing, + TrainTaskConfig.label_smooth_eps, + use_py_reader=use_py_reader, + is_test=False) + exe = fluid.Executor(fluid.CPUPlace( + ) if not core.is_compiled_with_cuda() else fluid.CUDAPlace(0)) + optimizer = fluid.optimizer.SGD(learning_rate=0.003) + + data_input_names = encoder_data_input_fields + decoder_data_input_fields[: + -1] + label_data_input_fields + all_inputs = make_all_inputs(data_input_names) + enc_inputs_len = len(encoder_data_input_fields) + dec_inputs_len = len(decoder_data_input_fields[:-1]) + enc_inputs = all_inputs[0:enc_inputs_len] + dec_inputs = all_inputs[enc_inputs_len:enc_inputs_len + + dec_inputs_len] + label = all_inputs[-2] + weights = all_inputs[-1] + static_param_updated = dict() + static_param_init = dict() + static_param_name_list = list() + static_sum_cost, static_avg_cost, static_predict, static_token_num = transformer( + enc_inputs, dec_inputs, label, weights) + + optimizer.minimize(static_avg_cost) + for param in transformer.parameters(): + static_param_name_list.append(param.name) + out = exe.run(fluid.default_startup_program(), + fetch_list=static_param_name_list) + for i in range(len(static_param_name_list)): + static_param_init[static_param_name_list[i]] = out[i] + static_sum_cost_value = None + static_avg_cost_value = None + static_predict_value = None + static_token_num_value = None + for i in range(batch_num): + feed_dict = create_feed_dict_list(create_data(True)) + fetch_list = [ + static_sum_cost, static_avg_cost, static_predict, + static_token_num + ] + fetch_list.extend(static_param_name_list) + + out = exe.run(fluid.default_main_program(), + feed=feed_dict, + fetch_list=fetch_list) + static_sum_cost_value = out[0] + static_avg_cost_value = out[1] + static_predict_value = out[2] + static_token_num_value = out[3] + if i == batch_num - 1: + for k in range(4, len(out)): + static_param_updated[static_param_name_list[k - + 4]] = out[k] + + self.assertTrue( + np.allclose(static_avg_cost_value, dy_avg_cost._numpy())) + self.assertTrue( + np.allclose(static_sum_cost_value, dy_sum_cost._numpy())) + self.assertTrue( + np.allclose( + static_predict_value, dy_predict._numpy(), atol=1e-5)) + self.assertTrue( + np.allclose(static_token_num_value, dy_token_num._numpy())) + for key, value in six.iteritems(static_param_init): + self.assertTrue(np.allclose(value, dy_param_init[key])) + for key, value in six.iteritems(static_param_updated): + self.assertTrue( + np.allclose( + value, dy_param_updated[key], atol=1e-4)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/contrib/slim/tests/filter_pruning/__init__.py b/python/paddle/fluid/tests/unittests/test_install_check.py similarity index 75% rename from python/paddle/fluid/contrib/slim/tests/filter_pruning/__init__.py rename to python/paddle/fluid/tests/unittests/test_install_check.py index d0c32e26092f6ea25771279418582a24ea449ab2..5802e2ed0a3dfd7e1c45e91037a6c40b1b6bd2fc 100644 --- a/python/paddle/fluid/contrib/slim/tests/filter_pruning/__init__.py +++ b/python/paddle/fluid/tests/unittests/test_install_check.py @@ -11,3 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from __future__ import print_function +import unittest +import paddle.fluid as fluid + + +class TestInstallCheck(unittest.TestCase): + def test_install_check(self): + fluid.install_check.run_check() diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 1672c3600f389d87e85f965f96122065137cf0ac..7fd9617cc7687a5a553ed22cfed560aef8058496 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -42,10 +42,14 @@ class LayerTest(unittest.TestCase): def tearDownClass(cls): pass - def _get_place(self): - if core.is_compiled_with_cuda(): - return core.CUDAPlace(0) - return core.CPUPlace() + def _get_place(self, force_to_use_cpu=False): + # this option for ops that only have cpu kernel + if force_to_use_cpu: + return core.CPUPlace() + else: + if core.is_compiled_with_cuda(): + return core.CUDAPlace(0) + return core.CPUPlace() @contextlib.contextmanager def static_graph(self): @@ -54,22 +58,52 @@ class LayerTest(unittest.TestCase): fluid.default_main_program().random_seed = self.seed yield - def get_static_graph_result(self, feed, fetch_list): + def get_static_graph_result(self, feed, fetch_list, with_lod=False): exe = fluid.Executor(self._get_place()) exe.run(fluid.default_startup_program()) return exe.run(fluid.default_main_program(), feed=feed, - fetch_list=fetch_list) + fetch_list=fetch_list, + return_numpy=(not with_lod)) @contextlib.contextmanager - def dynamic_graph(self): - with fluid.imperative.guard(self._get_place()): + def dynamic_graph(self, force_to_use_cpu=False): + with fluid.imperative.guard( + self._get_place(force_to_use_cpu=force_to_use_cpu)): fluid.default_startup_program().random_seed = self.seed fluid.default_main_program().random_seed = self.seed yield class TestLayer(LayerTest): + def test_layer_norm(self): + inp = np.ones([3, 32, 32], dtype='float32') + with self.static_graph(): + t = layers.data( + name='data', + shape=[3, 32, 32], + dtype='float32', + append_batch_size=False) + ret = layers.layer_norm(t) + static_ret = self.get_static_graph_result( + feed={'data': inp}, fetch_list=[ret])[0] + with self.static_graph(): + t = layers.data( + name='data', + shape=[3, 32, 32], + dtype='float32', + append_batch_size=False) + lm = nn.LayerNorm('layer_norm') + ret = lm(t) + static_ret2 = self.get_static_graph_result( + feed={'data': inp}, fetch_list=[ret])[0] + with self.dynamic_graph(): + lm = nn.LayerNorm('layer_norm') + dy_ret = lm(base.to_variable(inp)) + + self.assertTrue(np.allclose(static_ret, static_ret2)) + self.assertTrue(np.allclose(dy_ret._numpy(), static_ret2)) + def test_relu(self): with self.static_graph(): t = layers.data(name='t', shape=[3, 3], dtype='float32') @@ -228,6 +262,304 @@ class TestLayer(LayerTest): self.assertTrue(np.allclose(n, min_ret._numpy())) self.assertTrue(np.allclose(n2, max_ret._numpy())) + def test_sequence_conv(self): + inp_np = np.arange(12).reshape([3, 4]).astype('float32') + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + else: + place = core.CPUPlace() + with self.static_graph(): + seq = layers.data( + name='seq_in', + shape=[3, 4], + dtype='float32', + lod_level=1, + append_batch_size=False) + out = layers.sequence_conv(seq, 2) + static_rlt = self.get_static_graph_result( + feed={ + "seq_in": fluid.create_lod_tensor( + data=inp_np, + recursive_seq_lens=[[1, 1, 1]], + place=place) + }, + fetch_list=[out], + with_lod=True)[0] + + with self.static_graph(): + seq = layers.data( + name='seq_in', + shape=[3, 4], + dtype='float32', + lod_level=1, + append_batch_size=False) + seq_conv = nn.SequenceConv('seq_conv', num_filters=2) + out = seq_conv(seq) + static_rlt2 = self.get_static_graph_result( + feed={ + "seq_in": fluid.create_lod_tensor( + data=inp_np, + recursive_seq_lens=[[1, 1, 1]], + place=place) + }, + fetch_list=[out], + with_lod=True)[0] + self.assertTrue( + np.allclose(np.array(static_rlt), np.array(static_rlt2))) + + def test_conv2d_transpose(self): + inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32') + with self.static_graph(): + img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32') + out = layers.conv2d_transpose( + input=img, num_filters=10, output_size=28) + static_rlt = self.get_static_graph_result( + feed={'pixel': inp_np}, fetch_list=[out])[0] + with self.static_graph(): + img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32') + conv2d_transpose = nn.Conv2DTranspose( + 'conv2d_transpose', num_filters=10, output_size=28) + out = conv2d_transpose(img) + static_rlt2 = self.get_static_graph_result( + feed={'pixel': inp_np}, fetch_list=[out])[0] + with self.dynamic_graph(): + conv2d_transpose = nn.Conv2DTranspose( + 'conv2d_transpose', num_filters=10, output_size=28) + dy_rlt = conv2d_transpose(base.to_variable(inp_np)) + self.assertTrue(np.allclose(static_rlt2, static_rlt)) + self.assertTrue(np.allclose(dy_rlt._numpy(), static_rlt)) + + def test_bilinear_tensor_product(self): + inp_np_x = np.array([[1, 2, 3]]).astype('float32') + inp_np_y = np.array([[4, 5, 6]]).astype('float32') + + with self.static_graph(): + data_x = layers.data( + name='x', + shape=[1, 3], + dtype="float32", + append_batch_size=False) + data_y = layers.data( + name='y', + shape=[1, 3], + dtype="float32", + append_batch_size=False) + out = layers.bilinear_tensor_product(data_x, data_y, 6) + + static_rlt = self.get_static_graph_result( + feed={'x': inp_np_x, + 'y': inp_np_y}, fetch_list=[out])[0] + with self.static_graph(): + data_x = layers.data( + name='x', + shape=[1, 3], + dtype="float32", + append_batch_size=False) + data_y = layers.data( + name='y', + shape=[1, 3], + dtype="float32", + append_batch_size=False) + btp = nn.BilinearTensorProduct('btp', 6) + out = btp(data_x, data_y) + static_rlt2 = self.get_static_graph_result( + feed={'x': inp_np_x, + 'y': inp_np_y}, fetch_list=[out])[0] + with self.dynamic_graph(): + btp = nn.BilinearTensorProduct('btp', 6) + dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y)) + + self.assertTrue(np.allclose(static_rlt2, static_rlt)) + self.assertTrue(np.allclose(dy_rlt._numpy(), static_rlt)) + + def test_prelu(self): + inp_np = np.ones([5, 200, 100, 100]).astype('float32') + + with self.static_graph(): + data_t = layers.data( + name="input", + shape=[5, 200, 100, 100], + dtype="float32", + append_batch_size=False) + mode = 'channel' + out = layers.prelu( + data_t, mode, param_attr=ParamAttr(initializer=Constant(1.0))) + static_rlt = self.get_static_graph_result( + feed={"input": inp_np}, fetch_list=[out])[0] + + with self.static_graph(): + data_t = layers.data( + name="input", + shape=[5, 200, 100, 100], + dtype="float32", + append_batch_size=False) + mode = 'channel' + prelu = nn.PRelu( + 'prelu', + mode=mode, + param_attr=ParamAttr(initializer=Constant(1.0))) + out = prelu(data_t) + static_rlt2 = self.get_static_graph_result( + feed={"input": inp_np}, fetch_list=[out])[0] + + with self.dynamic_graph(): + mode = 'channel' + prelu = nn.PRelu( + 'prelu', + mode=mode, + param_attr=ParamAttr(initializer=Constant(1.0))) + dy_rlt = prelu(base.to_variable(inp_np)) + + self.assertTrue(np.allclose(static_rlt2, static_rlt)) + self.assertTrue(np.allclose(dy_rlt._numpy(), static_rlt)) + + def test_embeding(self): + inp_word = np.array([[[1]]]).astype('int64') + dict_size = 20 + with self.static_graph(): + data_t = layers.data(name='word', shape=[1], dtype='int64') + emb = layers.embedding( + input=data_t, + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + static_rlt = self.get_static_graph_result( + feed={'word': inp_word}, fetch_list=[emb])[0] + with self.static_graph(): + data_t = layers.data(name='word', shape=[1], dtype='int64') + emb2 = nn.Embedding( + name_scope='embedding', + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + emb_rlt = emb2(data_t) + static_rlt2 = self.get_static_graph_result( + feed={'word': inp_word}, fetch_list=[emb_rlt])[0] + with self.dynamic_graph(): + emb2 = nn.Embedding( + name_scope='embedding', + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + static_rlt3 = emb2(base.to_variable(inp_word)) + + self.assertTrue(np.allclose(static_rlt2, static_rlt)) + self.assertTrue(np.allclose(static_rlt3._numpy(), static_rlt)) + + def test_nce(self): + window_size = 5 + dict_size = 20 + label_word = int(window_size // 2) + 1 + inp_word = np.array([[[1]], [[2]], [[3]], [[4]], [[5]]]).astype('int64') + nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype('float32') + seed = 1 + with self.static_graph(): + words = [] + for i in range(window_size): + words.append( + layers.data( + name='word_{0}'.format(i), shape=[1], dtype='int64')) + + embs = [] + for i in range(window_size): + if i == label_word: + continue + + emb = layers.embedding( + input=words[i], + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + embs.append(emb) + + embs = layers.concat(input=embs, axis=1) + nce_loss = layers.nce(input=embs, + label=words[label_word], + num_total_classes=dict_size, + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='nce.w', + bias_attr='nce.b') + feed_dict = dict() + for i in range(window_size): + feed_dict['word_{0}'.format(i)] = inp_word[i] + static_rlt = self.get_static_graph_result( + feed=feed_dict, fetch_list=[nce_loss])[0] + with self.static_graph(): + words = [] + for i in range(window_size): + words.append( + layers.data( + name='word_{0}'.format(i), shape=[1], dtype='int64')) + + emb = nn.Embedding( + 'embedding', + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + + embs2 = [] + for i in range(window_size): + if i == label_word: + continue + + emb_rlt = emb(words[i]) + embs2.append(emb_rlt) + + embs2 = layers.concat(input=embs2, axis=1) + nce = nn.NCE('nce', + num_total_classes=dict_size, + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='nce.w', + bias_attr='nce.b') + + nce_loss2 = nce(embs2, words[label_word]) + feed_dict = dict() + for i in range(len(words)): + feed_dict['word_{0}'.format(i)] = inp_word[i] + + static_rlt2 = self.get_static_graph_result( + feed=feed_dict, fetch_list=[nce_loss2])[0] + + with self.dynamic_graph(force_to_use_cpu=True): + words = [] + for i in range(window_size): + words.append(base.to_variable(inp_word[i])) + + emb = nn.Embedding( + 'embedding', + size=[dict_size, 32], + param_attr='emb.w', + is_sparse=False) + + embs3 = [] + for i in range(window_size): + if i == label_word: + continue + + emb_rlt = emb(words[i]) + embs3.append(emb_rlt) + + embs3 = layers.concat(input=embs3, axis=1) + nce = nn.NCE('nce', + num_total_classes=dict_size, + num_neg_samples=2, + sampler="custom_dist", + custom_dist=nid_freq_arr.tolist(), + seed=seed, + param_attr='nce.w', + bias_attr='nce.b') + + nce_loss3 = nce(embs3, words[label_word]) + + self.assertTrue(np.allclose(static_rlt2, static_rlt)) + self.assertTrue(np.allclose(nce_loss3._numpy(), static_rlt)) + class TestBook(unittest.TestCase): def test_fit_a_line(self): @@ -1269,6 +1601,15 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(out) print(str(program)) + def test_fsp(self): + program = Program() + with program_guard(program): + x = layers.data(name="X", shape=[16, 4, 4], dtype="float32") + y = layers.data(name="Y", shape=[8, 4, 4], dtype="float32") + out = layers.fsp_matrix(x, y) + self.assertIsNotNone(out) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_nce_remote_table_op.py b/python/paddle/fluid/tests/unittests/test_nce_remote_table_op.py index cc6f40de86e302605a416c48790c74cbb431b2e3..d24532b95fb18a383e7de7f60052885d08be4fc0 100644 --- a/python/paddle/fluid/tests/unittests/test_nce_remote_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_nce_remote_table_op.py @@ -205,9 +205,9 @@ class TestListenAndServOp(unittest.TestCase): out = nce(x_array, param_array, bias_array, sample_weight, label_array, 5, 2) - self.assertAlmostEqual(o_cost.all(), out[0].all(), delta=1e-6) - self.assertAlmostEqual(o_logits.all(), out[1].all(), delta=1e-6) - self.assertAlmostEqual(o_labels.all(), out[2].all(), delta=1e-6) + np.testing.assert_almost_equal(o_cost, out[0], decimal=6) + np.testing.assert_almost_equal(o_logits, out[1], decimal=6) + np.testing.assert_almost_equal(o_labels, out[2], decimal=6) def test_nce_op_remote(self): os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1" diff --git a/python/paddle/fluid/tests/unittests/test_partial_eager_deletion_transformer.py b/python/paddle/fluid/tests/unittests/test_partial_eager_deletion_transformer.py index 7607189454b2264523176b6853fd9debddf47eed..ef06e7d9fcf7597c721b19a1e13647471c83e7a6 100644 --- a/python/paddle/fluid/tests/unittests/test_partial_eager_deletion_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_partial_eager_deletion_transformer.py @@ -14,11 +14,12 @@ import os import unittest -os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" -os.environ['FLAGS_memory_fraction_of_eager_deletion'] = "0.55" +import paddle.fluid as fluid os.environ['RECORDIO_FILENAME'] = './p_gc_transformer.wmt16.recordio' +fluid.core._set_eager_deletion_mode(0.0, 0.55, True) + from test_parallel_executor_transformer import TestTransformer if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py b/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..4efca5e2aafd9c370ccc37791a9900b18f2705f6 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_py_reader_sample_generator.py @@ -0,0 +1,137 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +import math +import unittest +import numpy as np +import os + +os.environ['CPU_NUM'] = '1' + + +def random_reader(sample_num): + def __impl__(): + for _ in range(sample_num): + yield np.random.random( + size=[784]).astype('float32'), np.random.random_integers( + low=0, high=9, size=[1]).astype('int64') + + return paddle.reader.cache(__impl__) + + +class TestCaseBase(unittest.TestCase): + def setUp(self): + self.batch_size = 32 + self.epoch_num = 2 + self.sample_num = 165 + + def generate_all_data(self, reader): + ret = [] + for d in reader(): + slots = [[], []] + for item in d: + slots[0].append(item[0]) + slots[1].append(item[1]) + slots = [np.array(slot) for slot in slots] + ret.append(slots) + return ret + + def run_main(self, reader, use_sample_generator, iterable, drop_last): + image = fluid.layers.data(name='image', dtype='float32', shape=[784]) + label = fluid.layers.data(name='label', dtype='int64', shape=[1]) + py_reader = fluid.io.PyReader( + feed_list=[image, label], + capacity=16, + iterable=iterable, + use_double_buffer=False) + + batch_reader = paddle.batch(reader, self.batch_size, drop_last) + all_datas = self.generate_all_data(batch_reader) + + if not use_sample_generator: + py_reader.decorate_sample_list_generator( + batch_reader, places=fluid.cpu_places()) + else: + py_reader.decorate_sample_generator( + reader, self.batch_size, drop_last, places=fluid.cpu_places()) + + if drop_last: + batch_num = int(self.sample_num / self.batch_size) + else: + batch_num = math.ceil(float(self.sample_num) / self.batch_size) + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_startup_program()) + for _ in range(self.epoch_num): + if py_reader.iterable: + step = 0 + for data in py_reader(): + img, lbl = exe.run(feed=data, fetch_list=[image, label]) + self.assertArrayEqual(img, all_datas[step][0]) + self.assertArrayEqual(lbl, all_datas[step][1]) + step += 1 + self.assertEqual(step, len(all_datas)) + else: + step = 0 + try: + py_reader.start() + while True: + img, lbl = exe.run(fetch_list=[image, label]) + self.assertArrayEqual(img, all_datas[step][0]) + self.assertArrayEqual(lbl, all_datas[step][1]) + step += 1 + except fluid.core.EOFException: + py_reader.reset() + self.assertEqual(step, len(all_datas)) + break + + def assertArrayEqual(self, arr1, arr2): + self.assertEqual(arr1.shape, arr2.shape) + self.assertTrue((arr1 == arr2).all()) + + def test_main(self): + reader = random_reader(self.sample_num) + for use_sample_generator in [False, True]: + for iterable in [False, True]: + for drop_last in [False, True]: + with fluid.program_guard(fluid.Program(), fluid.Program()): + self.run_main(reader, use_sample_generator, iterable, + drop_last) + + +class TestCase1(TestCaseBase): + def setUp(self): + self.batch_size = 32 + self.epoch_num = 10 + self.sample_num = 160 + + +class TestCase2(TestCaseBase): + def setUp(self): + self.batch_size = 32 + self.epoch_num = 2 + self.sample_num = 200 + + +class TestCase3(TestCaseBase): + def setUp(self): + self.batch_size = 32 + self.epoch_num = 2 + self.sample_num = 159 + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_roi_align_op.py b/python/paddle/fluid/tests/unittests/test_roi_align_op.py index 1a252ea547e4d93d83f64fa9cdb3605eeef0a3cf..aad2eaed94a356d06afb7cd461eecefa2de98d8c 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_align_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_align_op.py @@ -168,3 +168,7 @@ class TestROIAlignOp(OpTest): def test_check_grad(self): self.check_grad(['X'], 'Out') + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index 1822957c23d0bb1e4821373515d4faef2b76950e..3c974ea460c11a49b657b724bf521d1c16f3a189 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -14,6 +14,7 @@ from __future__ import print_function +import paddle.fluid as fluid import paddle.fluid.core as core import unittest import numpy @@ -183,6 +184,58 @@ class TestTensor(unittest.TestCase): tensor_array = numpy.array(tensor) self.assertEqual((0, 1), tensor_array.shape) + def run_sliece_tensor(self, place): + + tensor = fluid.Tensor() + shape = [3, 3, 3] + tensor._set_dims(shape) + + tensor_array = numpy.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]) + + tensor.set(tensor_array, place) + n1 = tensor[1] + t1 = tensor_array[1] + self.assertTrue((numpy.array(n1) == numpy.array(t1)).all()) + + n2 = tensor[1:] + t2 = tensor_array[1:] + self.assertTrue((numpy.array(n2) == numpy.array(t2)).all()) + + n3 = tensor[0:2:] + t3 = tensor_array[0:2:] + self.assertTrue((numpy.array(n3) == numpy.array(t3)).all()) + + n4 = tensor[2::-2] + t4 = tensor_array[2::-2] + self.assertTrue((numpy.array(n4) == numpy.array(t4)).all()) + + n5 = tensor[2::-2][0] + t5 = tensor_array[2::-2][0] + self.assertTrue((numpy.array(n5) == numpy.array(t5)).all()) + + n6 = tensor[2:-1:-1] + t6 = tensor_array[2:-1:-1] + self.assertTrue((numpy.array(n6) == numpy.array(t6)).all()) + + n7 = tensor[0:, 0:] + t7 = tensor_array[0:, 0:] + self.assertTrue((numpy.array(n7) == numpy.array(t7)).all()) + + n8 = tensor[0::1, 0::-1, 2:] + t8 = tensor_array[0::1, 0::-1, 2:] + self.assertTrue((numpy.array(n8) == numpy.array(t8)).all()) + + def test_sliece_tensor(self): + # run cpu first + place = core.CPUPlace() + self.run_sliece_tensor(place) + + if core.is_compiled_with_cuda(): + place = core.CUDAPlace(0) + self.run_sliece_tensor(place) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_variable.py b/python/paddle/fluid/tests/unittests/test_variable.py index 4f3c26ca7bdf4d807952b413c8b0dc8b211c06f6..076ee3baf96ab3c16f3ed9a3b9a15e2eb2aaed77 100644 --- a/python/paddle/fluid/tests/unittests/test_variable.py +++ b/python/paddle/fluid/tests/unittests/test_variable.py @@ -16,8 +16,10 @@ from __future__ import print_function import unittest from paddle.fluid.framework import default_main_program, Program, convert_np_dtype_to_dtype_ +import paddle.fluid as fluid import paddle.fluid.core as core import numpy as np +from test_imperative_base import new_program_scope class TestVariable(unittest.TestCase): @@ -60,6 +62,100 @@ class TestVariable(unittest.TestCase): name='step_scopes', type=core.VarDesc.VarType.STEP_SCOPES) self.assertEqual(core.VarDesc.VarType.STEP_SCOPES, var.type) + def _test_slice(self): + b = default_main_program().current_block() + w = b.create_var(dtype="float64", shape=[784, 100, 100], lod_level=0) + + for i in range(3): + nw = w[i] + self.assertEqual((1, 100, 100), nw.shape) + + nw = w[:] + self.assertEqual((784, 100, 100), nw.shape) + + nw = w[:, :, ...] + self.assertEqual((784, 100, 100), nw.shape) + + nw = w[::2, ::2, :] + self.assertEqual((392, 50, 100), nw.shape) + + nw = w[::-2, ::-2, :] + self.assertEqual((392, 50, 100), nw.shape) + + self.assertEqual(0, nw.lod_level) + + place = fluid.CPUPlace() + main = fluid.Program() + with fluid.program_guard(main): + exe = fluid.Executor(place) + tensor_array = np.array( + [[[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]]]).astype('float32') + var = fluid.layers.assign(tensor_array) + var1 = var[0, 1, 1] + var2 = var[1:] + var3 = var[0:1] + var4 = var[..., ] + var5 = var[2::-2] + var6 = var[1, 1:, 1:] + var7 = var[1, ..., 1:] + var8 = var[1, ...] + local_out = exe.run(main, + fetch_list=[ + var, var1, var2, var3, var4, var5, var6, + var7, var8 + ]) + + self.assertTrue((np.array(local_out[1]) == np.array(tensor_array[ + 0, 1, 1])).all()) + self.assertTrue((np.array(local_out[2]) == np.array(tensor_array[ + 1:])).all()) + self.assertTrue((np.array(local_out[3]) == np.array(tensor_array[ + 0:1])).all()) + self.assertTrue((np.array(local_out[4]) == np.array( + tensor_array[..., ])).all()) + self.assertTrue((np.array(local_out[5]) == np.array(tensor_array[ + 2::-2])).all()) + self.assertTrue((np.array(local_out[6]) == np.array(tensor_array[ + 1, 1:, 1:])).all()) + self.assertTrue((np.array(local_out[7]) == np.array(tensor_array[ + 1, ..., 1:])).all()) + self.assertTrue((np.array(local_out[8]) == np.array(tensor_array[ + 1, ...])).all()) + + def test_slice(self): + self._test_slice() + + +class TestVariableImperative(unittest.TestCase): + def _test_slice(self): + b = default_main_program().current_block() + w = b.create_var(dtype="float64", shape=[784, 100, 100], lod_level=0) + + for i in range(3): + nw = w[i] + self.assertEqual([1, 100, 100], nw.shape) + + nw = w[:] + self.assertEqual([784, 100, 100], nw.shape) + + nw = w[:, :, :] + self.assertEqual([784, 100, 100], nw.shape) + + nw = w[::2, ::2, :] + self.assertEqual([392, 50, 100], nw.shape) + + nw = w[::-2, ::-2, :] + self.assertEqual([392, 50, 100], nw.shape) + + nw = w[0::-2, 0::-2, :] + self.assertEqual([1, 1, 100], nw.shape) + + def test_slice(self): + with fluid.imperative.guard(): + self._test_slice() + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py index 685d08b9e0b2127fbe8f8b55f8c329ce0002bbe7..f8c5ae0eaf45fd3ab43652c16b4954d622787702 100644 --- a/python/paddle/reader/decorator.py +++ b/python/paddle/reader/decorator.py @@ -13,7 +13,7 @@ # limitations under the License. __all__ = [ - 'map_readers', 'buffered', 'compose', 'chain', 'shuffle', + 'cache', 'map_readers', 'buffered', 'compose', 'chain', 'shuffle', 'ComposeNotAligned', 'firstn', 'xmap_readers', 'PipeReader', 'multiprocess_reader', 'Fake' ] @@ -33,6 +33,30 @@ import zlib import paddle.compat as cpt +def cache(reader): + """ + Cache the reader data into memory. + + Be careful that this method may take long time to process, + and consume lots of memory. :code:`reader()` would only + call once. + + Args: + reader (generator): a reader object which yields + data each time. + + Returns: + generator: a decorated reader object which yields data from cached memory. + """ + all_data = tuple(reader()) + + def __impl__(): + for item in all_data: + yield item + + return __impl__ + + def map_readers(func, *readers): """ Creates a data reader that outputs return value of function using diff --git a/python/setup.py.in b/python/setup.py.in index a7c1e91f9c3a9597d799659a0abe3c9f56e54a57..9f87f5644fc969f3f55fd08689f3e2bbaf36dc39 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -117,6 +117,7 @@ packages=['paddle', 'paddle.fluid.contrib.slim.graph', 'paddle.fluid.contrib.slim.prune', 'paddle.fluid.contrib.slim.quantization', + 'paddle.fluid.contrib.slim.distillation', 'paddle.fluid.contrib.utils', 'paddle.fluid.transpiler', 'paddle.fluid.transpiler.details']