diff --git a/CMakeLists.txt b/CMakeLists.txt index 0a44e56719baa433a5c45df2082fa86296b3da1c..65fbbb481c432f7b905f4dec7ea39c51ec853ae8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,7 +11,7 @@ find_package(Protobuf REQUIRED) # Check protobuf library version. execute_process(COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --version - OUTPUT_VARIABLE PROTOBUF_VERSION) + OUTPUT_VARIABLE PROTOBUF_VERSION) string(REPLACE "libprotoc " "" PROTOBUF_VERSION ${PROTOBUF_VERSION}) set(PROTOBUF_3 OFF) @@ -25,8 +25,8 @@ find_package(ZLIB REQUIRED) find_package(NumPy REQUIRED) find_package(Threads REQUIRED) find_package(AVX QUIET) -find_package(Glog) -find_package(Gflags QUIET) +find_package(Glog REQUIRED) +find_package(Gflags REQUIRED) find_package(GTest) find_package(Sphinx) find_package(Doxygen) @@ -40,8 +40,6 @@ option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND}) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND}) option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF) -option(WITH_GLOG "Compile PaddlePaddle use glog, otherwise use a log implement internally" ${LIBGLOG_FOUND}) -option(WITH_GFLAGS "Compile PaddlePaddle use gflags, otherwise use a flag implement internally" ${GFLAGS_FOUND}) option(WITH_TIMER "Compile PaddlePaddle use timer" OFF) option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF) option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND}) @@ -136,16 +134,12 @@ else(WITH_RDMA) add_definitions(-DPADDLE_DISABLE_RDMA) endif(WITH_RDMA) -if(WITH_GLOG) - add_definitions(-DPADDLE_USE_GLOG) - include_directories(${LIBGLOG_INCLUDE_DIR}) -endif() +# glog +include_directories(${LIBGLOG_INCLUDE_DIR}) -if(WITH_GFLAGS) - add_definitions(-DPADDLE_USE_GFLAGS) - add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE}) - include_directories(${GFLAGS_INCLUDE_DIRS}) -endif() +#gflags +add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE}) +include_directories(${GFLAGS_INCLUDE_DIRS}) if(WITH_TESTING) enable_testing() @@ -169,5 +163,4 @@ add_subdirectory(paddle) add_subdirectory(python) if(WITH_DOC) add_subdirectory(doc) - add_subdirectory(doc_cn) endif() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 120000 index 0000000000000000000000000000000000000000..f3eb8b4edba28a689ae662232c0b47de47bd0699 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1 @@ +./doc/howto/contribute_to_paddle_en.md \ No newline at end of file diff --git a/WORKSPACE b/WORKSPACE index 0b8299905abb844bfbd8b27f47b8fafded31ef7a..f097c41da85affd1ff0b24200dbdbc63bf9c3ab6 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -3,7 +3,7 @@ http_archive( name="protobuf", url="http://github.com/google/protobuf/archive/v3.1.0.tar.gz", sha256="0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7", - strip_prefix="protobuf-3.1.0", ) + strip_prefix="protobuf-3.1.0") # External dependency to gtest 1.7.0. This method comes from # https://www.bazel.io/versions/master/docs/tutorial/cpp.html. @@ -12,4 +12,20 @@ new_http_archive( url="https://github.com/google/googletest/archive/release-1.7.0.zip", sha256="b58cb7547a28b2c718d1e38aee18a3659c9e3ff52440297e965f5edffe34b6d0", build_file="third_party/gtest.BUILD", - strip_prefix="googletest-release-1.7.0", ) + strip_prefix="googletest-release-1.7.0") + +# External dependency to gflags. This method comes from +# https://github.com/gflags/example/blob/master/WORKSPACE. +new_git_repository( + name="gflags", + tag="v2.2.0", + remote="https://github.com/gflags/gflags.git", + build_file="third_party/gflags.BUILD") + +# External dependency to glog. This method comes from +# https://github.com/reyoung/bazel_playground/blob/master/WORKSPACE +new_git_repository( + name="glog", + remote="https://github.com/google/glog.git", + commit="b6a5e0524c28178985f0d228e9eaa43808dbec3c", + build_file="third_party/glog.BUILD") diff --git a/cmake/FindSphinx.cmake b/cmake/FindSphinx.cmake index 6702f45a168bf0dfc6cfca3ff8e68fbc79c92b11..05aa100eaefcf0d9119763b1440c96e341c80387 100644 --- a/cmake/FindSphinx.cmake +++ b/cmake/FindSphinx.cmake @@ -72,6 +72,7 @@ function( Sphinx_add_target target_name builder conf cache source destination ) ${source} ${destination} COMMENT "Generating sphinx documentation: ${builder}" + COMMAND ln -s ${destination}/index_*.html ${destination}/index.html ) set_property( @@ -143,4 +144,4 @@ function( Sphinx_add_targets target_base_name conf source base_destination ) add_dependencies( ${target_base_name}_linkcheck ${_dependencies} ) endif() -endfunction() \ No newline at end of file +endfunction() diff --git a/cmake/check_packages.cmake b/cmake/check_packages.cmake index 06887455418797f7162a5970669a0483e42a2db8..4b7cadfc85cd5c5bd9f1ff7e4dc833cf4f438005 100644 --- a/cmake/check_packages.cmake +++ b/cmake/check_packages.cmake @@ -14,13 +14,9 @@ if(WITH_STYLE_CHECK) find_package(PythonInterp REQUIRED) endif() -if(WITH_GLOG) - find_package(Glog REQUIRED) -endif() +find_package(Glog REQUIRED) -if(WITH_GFLAGS) - find_package(Gflags REQUIRED) -endif() +find_package(Gflags REQUIRED) if(WITH_TESTING) find_package(GTest REQUIRED) diff --git a/cmake/util.cmake b/cmake/util.cmake index 11641f6064b9db36e14293460a1f05067e373661..38366373c6dbcc1d05c359484ae73ace1bbc59be 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -65,7 +65,7 @@ endmacro() # link_paddle_exe # add paddle library for a paddle executable, such as trainer, pserver. # -# It will handle WITH_PYTHON/WITH_GLOG etc. +# It will handle WITH_PYTHON etc. function(link_paddle_exe TARGET_NAME) if(WITH_RDMA) generate_rdma_links() @@ -108,6 +108,8 @@ function(link_paddle_exe TARGET_NAME) paddle_cuda ${METRIC_LIBS} ${PROTOBUF_LIBRARY} + ${LIBGLOG_LIBRARY} + ${GFLAGS_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT} ${CBLAS_LIBS} ${ZLIB_LIBRARIES} @@ -119,27 +121,17 @@ function(link_paddle_exe TARGET_NAME) ${RDMA_LD_FLAGS} ${RDMA_LIBS}) endif() - + if(WITH_PYTHON) target_link_libraries(${TARGET_NAME} ${PYTHON_LIBRARIES}) endif() - if(WITH_GLOG) - target_link_libraries(${TARGET_NAME} - ${LIBGLOG_LIBRARY}) - endif() - - if(WITH_GFLAGS) - target_link_libraries(${TARGET_NAME} - ${GFLAGS_LIBRARIES}) - endif() - if(WITH_GPU) - if(NOT WITH_DSO OR WITH_METRIC) + if(NOT WITH_DSO OR WITH_METRIC) target_link_libraries(${TARGET_NAME} ${CUDNN_LIBRARY} - ${CUDA_curand_LIBRARY}) + ${CUDA_curand_LIBRARY}) CUDA_ADD_CUBLAS_TO_TARGET(${TARGET_NAME}) endif() @@ -206,5 +198,5 @@ function(create_resources res_file output) # Convert hex data for C compatibility string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1," filedata ${filedata}) # Append data to output file - file(APPEND ${output} "const unsigned char ${filename}[] = {${filedata}};\nconst unsigned ${filename}_size = sizeof(${filename});\n") + file(APPEND ${output} "const unsigned char ${filename}[] = {${filedata}0};\nconst unsigned ${filename}_size = sizeof(${filename});\n") endfunction() diff --git a/demo/semantic_role_labeling/data/extract_dict_feature.py b/demo/semantic_role_labeling/data/extract_dict_feature.py index a02a49a86ed31f44058c192525a2acd979c5de0b..da44111976a0dec68345fc139d0aa459ca9211c2 100644 --- a/demo/semantic_role_labeling/data/extract_dict_feature.py +++ b/demo/semantic_role_labeling/data/extract_dict_feature.py @@ -43,13 +43,13 @@ def extract_dict_features(pair_file, feature_file): mark[verb_index] = 1 ctx_0 = sentence_list[verb_index] - if verb_index < len(labels_list) - 2: + if verb_index < len(labels_list) - 1: mark[verb_index + 1] = 1 ctx_p1 = sentence_list[verb_index + 1] else: ctx_p1 = 'eos' - if verb_index < len(labels_list) - 3: + if verb_index < len(labels_list) - 2: mark[verb_index + 2] = 1 ctx_p2 = sentence_list[verb_index + 2] else: diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index efcf8b0ad3d6f2f831fe71f3c09163015cc1ac96..1b0fbadeb3e6e67c086239e2c66e3f81b13ee32f 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -7,25 +7,50 @@ if(NOT DEFINED SPHINX_THEME_DIR) endif() # configured documentation tools and intermediate build results -set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") +set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build") # Sphinx cache with pickled ReST documents -set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") +set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") -# HTML output directory -set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html") +# HTML output director +set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in" - "${BINARY_BUILD_DIR}/conf.py" + "${CMAKE_CURRENT_SOURCE_DIR}/conf.py.en.in" + "${BINARY_BUILD_DIR_EN}/conf.py" @ONLY) sphinx_add_target(paddle_docs html - ${BINARY_BUILD_DIR} - ${SPHINX_CACHE_DIR} + ${BINARY_BUILD_DIR_EN} + ${SPHINX_CACHE_DIR_EN} ${CMAKE_CURRENT_SOURCE_DIR} - ${SPHINX_HTML_DIR}) + ${SPHINX_HTML_DIR_EN}) add_dependencies(paddle_docs gen_proto_py) + + +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_doctrees") + +# HTML output directory +set(SPHINX_HTML_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/conf.py.cn.in" + "${BINARY_BUILD_DIR_CN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_docs_cn + html + ${BINARY_BUILD_DIR_CN} + ${SPHINX_CACHE_DIR_CN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_CN}) + +add_dependencies(paddle_docs_cn + gen_proto_py) diff --git a/doc/about/index_cn.md b/doc/about/index_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..3bf030004d4de8c6f3cb773c6e78c09f40878c5f --- /dev/null +++ b/doc/about/index_cn.md @@ -0,0 +1,11 @@ +关于PaddlePaddle +================ + +PaddlePaddle是一个最早由百度科学家和工程师共同研发的并行分布式深度学习平台,兼备易用性、高效性、灵活性和可扩展性,目前已被百度内部多个产品线广泛使用。 +PaddlePaddle目前已经开放源码, 但是远未完善,我们希望能在这个基础上不断的改进、扩展和延伸。 +同时我们希望广大开发者积极提供反馈和贡献源代码,建立一个活跃的开源社区。 + +致谢 +-------- + +在此,特别感谢PaddlePaddle的[所有贡献者](https://github.com/PaddlePaddle/Paddle/graphs/contributors)。 diff --git a/doc_cn/ui/data_provider/dataprovider.rst b/doc/api/data_provider/dataprovider_cn.rst similarity index 99% rename from doc_cn/ui/data_provider/dataprovider.rst rename to doc/api/data_provider/dataprovider_cn.rst index e6796429a78801eba5e5fb776dd6fbe3413115ea..6861ecece8cad19aa8a1e4e67e819f40873ef07c 100644 --- a/doc_cn/ui/data_provider/dataprovider.rst +++ b/doc/api/data_provider/dataprovider_cn.rst @@ -1,13 +1,13 @@ DataProvider的介绍 ================== -DataProvider是PaddlePaddle负责提供数据的模块。其作用是将数据传入内存或显存,让神经网络可以进行训练或预测。用户可以通过简单使用Python接口 `PyDataProvider2 `_ ,来自定义传数据的过程。如果有更复杂的使用,或者需要更高的效率,用户也可以在C++端自定义一个 ``DataProvider`` 。 +DataProvider是PaddlePaddle负责提供数据的模块。其作用是将数据传入内存或显存,让神经网络可以进行训练或预测。用户可以通过简单使用Python接口 `PyDataProvider2 `_ ,来自定义传数据的过程。如果有更复杂的使用,或者需要更高的效率,用户也可以在C++端自定义一个 ``DataProvider`` 。 PaddlePaddle需要用户在网络配置(trainer_config.py)中定义使用哪种DataProvider,并且在DataProvider中实现如何访问训练文件列表(train.list)或测试文件列表(test.list)。 -- train.list和test.list存放在本地(推荐直接存放到训练目录,以相对路径引用)。一般情况下,两者均为纯文本文件,其中每一行对应一个数据文件地址: - - - 如果数据文件存于本地磁盘,这个地址则为它的绝对路径或相对路径(相对于PaddlePaddle程序运行时的路径)。 - - 地址也可以为hdfs文件路径,或者数据库连接路径等。 - - 由于这个地址会被DataProvider使用,因此,如何解析该地址也是用户自定义DataProvider时需要考虑的地方。 +- train.list和test.list存放在本地(推荐直接存放到训练目录,以相对路径引用)。一般情况下,两者均为纯文本文件,其中每一行对应一个数据文件地址: + + - 如果数据文件存于本地磁盘,这个地址则为它的绝对路径或相对路径(相对于PaddlePaddle程序运行时的路径)。 + - 地址也可以为hdfs文件路径,或者数据库连接路径等。 + - 由于这个地址会被DataProvider使用,因此,如何解析该地址也是用户自定义DataProvider时需要考虑的地方。 - 如果没有设置test.list,或设置为None,那么在训练过程中不会执行测试操作;否则,会根据命令行参数指定的测试方式,在训练过程中进行测试,从而防止过拟合。 diff --git a/doc/api/data_provider/index_en.rst b/doc/api/data_provider/dataprovider_en.rst similarity index 100% rename from doc/api/data_provider/index_en.rst rename to doc/api/data_provider/dataprovider_en.rst diff --git a/doc_cn/ui/data_provider/pydataprovider2.rst b/doc/api/data_provider/pydataprovider2_cn.rst similarity index 95% rename from doc_cn/ui/data_provider/pydataprovider2.rst rename to doc/api/data_provider/pydataprovider2_cn.rst index dce373118c5ae01c7ecf9afc15e1d9af9bf4ebe4..f243ea775a6b4c0961a8948653ad54ea9b531dcb 100644 --- a/doc_cn/ui/data_provider/pydataprovider2.rst +++ b/doc/api/data_provider/pydataprovider2_cn.rst @@ -1,227 +1,227 @@ -PyDataProvider2的使用 -===================== - -PyDataProvider2是PaddlePaddle使用Python提供数据的推荐接口。该接口使用多线程读取数据,并提供了简单的Cache功能;同时可以使用户只关注如何从文件中读取每一条数据,而不用关心数据如何传输,如何存储等等。 - -.. contents:: - -MNIST的使用场景 ---------------- - -我们以MNIST手写识别为例,来说明PyDataProvider2的简单使用场景。 - -样例数据 -++++++++ - -MNIST是一个包含有70,000张灰度图片的数字分类数据集。样例数据 ``mnist_train.txt`` 如下: - -.. literalinclude:: mnist_train.txt - -其中每行数据代表一张图片,行内使用 ``;`` 分成两部分。第一部分是图片的标签,为0-9中的一个数字;第二部分是28*28的图片像素灰度值。 对应的 ``train.list`` 即为这个数据文件的名字: - -.. literalinclude:: train.list - -dataprovider的使用 -++++++++++++++++++ - -.. literalinclude:: mnist_provider.dict.py - -- 首先,引入PaddlePaddle的PyDataProvider2包。 -- 其次,定义一个Python的 `Decorator `_ `@provider`_ 。用于将下一行的数据输入函数标记成一个PyDataProvider2,同时设置它的input_types属性。 - - - `input_types`_:设置这个PyDataProvider2返回什么样的数据。本例根据网络配置中 ``data_layer`` 的名字,显式指定返回的是一个28*28维的稠密浮点数向量和一个[0-9]的10维整数标签。 - - .. literalinclude:: mnist_config.py - :lines: 9-10 - - - 注意:如果用户不显示指定返回数据的对应关系,那么PaddlePaddle会根据layer的声明顺序,来确定对应关系。但这个关系可能不正确,所以推荐使用显式指定的方式来设置input_types。 -- 最后,实现数据输入函数(如本例的 ``process`` 函数)。 - - - 该函数的功能是:打开文本文件,读取每一行,将行中的数据转换成与input_types一致的格式,然后返回给PaddlePaddle进程。注意, - - - 返回的顺序需要和input_types中定义的顺序一致。 - - 返回时,必须使用Python关键词 ``yield`` ,相关概念是 ``generator`` 。 - - 一次yield调用,返回一条完整的样本。如果想为一个数据文件返回多条样本,只需要在函数中调用多次yield即可(本例中使用for循环进行多次调用)。 - - - 该函数具有两个参数: - - - settings:在本例中没有使用,具体可以参考 `init_hook`_ 中的说明。 - - filename:为 ``train.list`` 或 ``test.list`` 中的一行,即若干数据文件路径的某一个。 - -网络配置中的调用 -++++++++++++++++ - -在网络配置里,只需要一行代码就可以调用这个PyDataProvider2,如, - -.. literalinclude:: mnist_config.py - :lines: 1-7 - -训练数据是 ``train.list`` ,没有测试数据,调用的PyDataProvider2是 ``mnist_provider`` 模块中的 ``process`` 函数。 - -小结 -+++++ - -至此,简单的PyDataProvider2样例就说明完毕了。对用户来说,仅需要知道如何从 **一个文件** 中读取 **一条样本** ,就可以将数据传送给PaddlePaddle了。而PaddlePaddle则会帮用户做以下工作: - -* 将数据组合成Batch进行训练 -* 对训练数据进行Shuffle -* 多线程的数据读取 -* 缓存训练数据到内存(可选) -* CPU->GPU双缓存 - -是不是很简单呢? - -时序模型的使用场景 ------------------- -样例数据 -++++++++ - -时序模型是指数据的某一维度是一个序列形式,即包含时间步信息。所谓时间步信息,不一定和时间有关系,只是说明数据的顺序是重要的。例如,文本信息就是一个序列数据。 - -本例采用英文情感分类的数据,即将一段英文文本数据,分类成正面情绪和负面情绪两类(用0和1表示)。样例数据 ``sentimental_train.txt`` 如下: - -.. literalinclude:: sentimental_train.txt - -dataprovider的使用 -++++++++++++++++++ - -相对MNIST而言,这个dataprovider较复杂,主要原因是增加了初始化机制 `init_hook`_。本例的 ``on_init`` 函数就是根据该机制配置的,它会在dataprovider创建的时候执行。 - -- 其中 ``input_types`` 和在 `@provider`_ 中配置的效果一致。本例中的输入特征是词ID的序列,因此使用 ``integer_value_sequence`` 类型来设置。 -- 将 ``dictionary`` 存入settings对象,在 ``process`` 函数中使用。 dictionary是从网络配置中传入的dict对象,即一个将单词字符串映射到单词ID的字典。 - -.. literalinclude:: sentimental_provider.py - -网络配置中的调用 -++++++++++++++++ - -调用这个PyDataProvider2的方法,基本上和MNIST样例一致,除了 - -* 在配置中需要读取外部字典。 -* 在声明DataProvider的时候传入dictionary作为参数。 - -.. literalinclude:: sentimental_config.py - :emphasize-lines: 12-14 - -参考(Reference) ---------------- - -@provider -+++++++++ - -``@provider`` 是一个Python的 `Decorator`_ ,可以将某一个函数标记成一个PyDataProvider2。如果不了解 `Decorator`_ 是什么也没关系,只需知道这是一个标记属性的方法就可以了。它包含的属性参数如下: - -* input_types:数据输入格式。具体的格式说明,请参考 `input_types`_ 。 -* should_shuffle:是不是要对数据做Shuffle。训练时默认shuffle,测试时默认不shuffle。 -* min_pool_size:设置内存中最小暂存的数据条数,也是PaddlePaddle所能够保证的shuffle粒度。如果为-1,则会预先读取全部数据到内存中。 -* pool_size: 设置内存中暂存的数据条数。如果为-1(默认),则不在乎内存暂存多少条数据。如果设置,则推荐大于训练时batch size的值,并且在内存足够的情况下越大越好。 -* can_over_batch_size:是否允许暂存略微多余pool_size的数据。由于这样做可以避免很多死锁问题,一般推荐设置成True。 -* calc_batch_size:可以传入一个函数,用于自定义每条数据的batch size(默认为1)。 -* cache: 数据缓存的策略,具体请参考 `cache`_ 。 -* init_hook:初始化时调用的函数,具体请参考 `init_hook`_ 。 -* check:如果为true,会根据input_types检查数据的合法性。 -* check_fail_continue:如果为true,那么当check出数据不合法时,会扔到这条数据,继续训练或预测。(对check=false的情况,没有作用) - -input_types -+++++++++++ - -PaddlePaddle的数据包括四种主要类型,和三种序列模式。 - -四种数据类型: - -* dense_vector:稠密的浮点数向量。 -* sparse_binary_vector:稀疏的01向量,即大部分值为0,但有值的地方必须为1。 -* sparse_float_vector:稀疏的向量,即大部分值为0,但有值的部分可以是任何浮点数。 -* integer:整数标签。 - -三种序列模式: - -* SequenceType.NO_SEQUENCE:不是一条序列 -* SequenceType.SEQUENCE:是一条时间序列 -* SequenceType.SUB_SEQUENCE: 是一条时间序列,且序列的每一个元素还是一个时间序列。 - -不同的数据类型和序列模式返回的格式不同,列表如下: - -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ -| | NO_SEQUENCE | SEQUENCE | SUB_SEQUENCE | -+======================+=====================+===================================+================================================+ -| dense_vector | [f, f, ...] | [[f, ...], [f, ...], ...] | [[[f, ...], ...], [[f, ...], ...],...] | -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ -| sparse_binary_vector | [i, i, ...] | [[i, ...], [i, ...], ...] | [[[i, ...], ...], [[i, ...], ...],...] | -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ -| sparse_float_vector | [(i,f), (i,f), ...] | [[(i,f), ...], [(i,f), ...], ...] | [[[(i,f), ...], ...], [[(i,f), ...], ...],...] | -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ -| integer_value | i | [i, i, ...] | [[i, ...], [i, ...], ...] | -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ - -其中,f代表一个浮点数,i代表一个整数。 - -注意:对sparse_binary_vector和sparse_float_vector,PaddlePaddle存的是有值位置的索引。例如, - -- 对一个5维非序列的稀疏01向量 ``[0, 1, 1, 0, 0]`` ,类型是sparse_binary_vector,返回的是 ``[1, 2]`` 。 -- 对一个5维非序列的稀疏浮点向量 ``[0, 0.5, 0.7, 0, 0]`` ,类型是sparse_float_vector,返回的是 ``[(1, 0.5), (2, 0.7)]`` 。 - -init_hook -+++++++++ - -init_hook可以传入一个函数。该函数在初始化的时候会被调用,其参数如下: - -* 第一个参数是settings对象,它和数据传入函数的第一个参数(如本例中 ``process`` 函数的 ``settings`` 参数)必须一致。该对象具有以下两个属性: - * settings.input_types:数据输入格式,具体请参考 `input_types`_ 。 - * settings.logger:一个logging对象。 -* 其他参数使用 ``kwargs`` (key word arguments)传入,包括以下两种: - * PaddlePaddle定义的参数: 1)is_train:bool型参数,表示用于训练或预测;2)file_list:所有文件列表。 - * 用户定义的参数:使用args在网络配置中设置。 - -注意:PaddlePaddle保留添加参数的权力,因此init_hook尽量使用 ``**kwargs`` 来接受不使用的函数以保证兼容性。 - -cache -+++++ - -PyDataProvider2提供了两种简单的Cache策略: - -* CacheType.NO_CACHE:不缓存任何数据,每次都会从python端读取数据 -* CacheType.CACHE_PASS_IN_MEM:第一个pass会从python端读取数据,剩下的pass会直接从内存里 - 读取数据。 - - -注意事项 --------- - -可能的内存泄露问题 -++++++++++++++++++ - -PaddlePaddle将train.list中的每一行都传递给process函数,从而生成多个generator。当训练数据非常多时,就会生成非常多的generator。 - -虽然每个generator在没有调用的时候,是几乎不占内存的;但当调用过一次后,generator便会存下当前的上下文(Context),而这个Context可能会非常大。并且,generator至少需要调用两次才会知道是否停止。所以,即使process函数里面只有一个yield,也需要两次随机选择到相同generator的时候,才会释放该段内存。 - -.. code-block:: python - - def func(): - yield 0 - - f = func() # 创建generator - tmp = next(f) # 调用一次,返回0 - tmp = next(f) # 调用第二次的时候,才会Stop Iteration - -由于顺序调用这些generator不会出现上述问题,因此有两种解决方案: - -1. **最佳推荐**:将样本的地址放入另一个文本文件,train.list写入那个文本文件的地址。即不要将每一个样本都放入train.list。 -2. 在generator的上下文中尽量留下非常少的变量引用,例如 - -.. code-block:: python - - def real_process(fn): - # ... read from fn - return result # 当函数返回的时候,python可以解除掉内部变量的引用。 - - def process(fn): - yield real_process(fn) - -注意:这个问题是PyDataProvider读数据时候的逻辑问题,很难整体修正。 - -内存不够用的情况 -++++++++++++++++ - -PyDataProvider2会尽可能多的使用内存。因此,对于内存较小的机器,推荐使用 ``pool_size`` 变量来设置内存中暂存的数据条。具体请参考 `@provider`_ 中的说明。 - +PyDataProvider2的使用 +===================== + +PyDataProvider2是PaddlePaddle使用Python提供数据的推荐接口。该接口使用多线程读取数据,并提供了简单的Cache功能;同时可以使用户只关注如何从文件中读取每一条数据,而不用关心数据如何传输,如何存储等等。 + +.. contents:: + +MNIST的使用场景 +--------------- + +我们以MNIST手写识别为例,来说明PyDataProvider2的简单使用场景。 + +样例数据 +++++++++ + +MNIST是一个包含有70,000张灰度图片的数字分类数据集。样例数据 ``mnist_train.txt`` 如下: + +.. literalinclude:: src/mnist_train.txt + +其中每行数据代表一张图片,行内使用 ``;`` 分成两部分。第一部分是图片的标签,为0-9中的一个数字;第二部分是28*28的图片像素灰度值。 对应的 ``train.list`` 即为这个数据文件的名字: + +.. literalinclude:: src/train.list + +dataprovider的使用 +++++++++++++++++++ + +.. literalinclude:: src/mnist_provider.dict.py + +- 首先,引入PaddlePaddle的PyDataProvider2包。 +- 其次,定义一个Python的 `Decorator `_ `@provider`_ 。用于将下一行的数据输入函数标记成一个PyDataProvider2,同时设置它的input_types属性。 + + - `input_types`_:设置这个PyDataProvider2返回什么样的数据。本例根据网络配置中 ``data_layer`` 的名字,显式指定返回的是一个28*28维的稠密浮点数向量和一个[0-9]的10维整数标签。 + + .. literalinclude:: src/mnist_config.py + :lines: 9-10 + + - 注意:如果用户不显示指定返回数据的对应关系,那么PaddlePaddle会根据layer的声明顺序,来确定对应关系。但这个关系可能不正确,所以推荐使用显式指定的方式来设置input_types。 +- 最后,实现数据输入函数(如本例的 ``process`` 函数)。 + + - 该函数的功能是:打开文本文件,读取每一行,将行中的数据转换成与input_types一致的格式,然后返回给PaddlePaddle进程。注意, + + - 返回的顺序需要和input_types中定义的顺序一致。 + - 返回时,必须使用Python关键词 ``yield`` ,相关概念是 ``generator`` 。 + - 一次yield调用,返回一条完整的样本。如果想为一个数据文件返回多条样本,只需要在函数中调用多次yield即可(本例中使用for循环进行多次调用)。 + + - 该函数具有两个参数: + + - settings:在本例中没有使用,具体可以参考 `init_hook`_ 中的说明。 + - filename:为 ``train.list`` 或 ``test.list`` 中的一行,即若干数据文件路径的某一个。 + +网络配置中的调用 +++++++++++++++++ + +在网络配置里,只需要一行代码就可以调用这个PyDataProvider2,如, + +.. literalinclude:: src/mnist_config.py + :lines: 1-7 + +训练数据是 ``train.list`` ,没有测试数据,调用的PyDataProvider2是 ``mnist_provider`` 模块中的 ``process`` 函数。 + +小结 ++++++ + +至此,简单的PyDataProvider2样例就说明完毕了。对用户来说,仅需要知道如何从 **一个文件** 中读取 **一条样本** ,就可以将数据传送给PaddlePaddle了。而PaddlePaddle则会帮用户做以下工作: + +* 将数据组合成Batch进行训练 +* 对训练数据进行Shuffle +* 多线程的数据读取 +* 缓存训练数据到内存(可选) +* CPU->GPU双缓存 + +是不是很简单呢? + +时序模型的使用场景 +------------------ +样例数据 +++++++++ + +时序模型是指数据的某一维度是一个序列形式,即包含时间步信息。所谓时间步信息,不一定和时间有关系,只是说明数据的顺序是重要的。例如,文本信息就是一个序列数据。 + +本例采用英文情感分类的数据,即将一段英文文本数据,分类成正面情绪和负面情绪两类(用0和1表示)。样例数据 ``sentimental_train.txt`` 如下: + +.. literalinclude:: src/sentimental_train.txt + +dataprovider的使用 +++++++++++++++++++ + +相对MNIST而言,这个dataprovider较复杂,主要原因是增加了初始化机制 `init_hook`_。本例的 ``on_init`` 函数就是根据该机制配置的,它会在dataprovider创建的时候执行。 + +- 其中 ``input_types`` 和在 `@provider`_ 中配置的效果一致。本例中的输入特征是词ID的序列,因此使用 ``integer_value_sequence`` 类型来设置。 +- 将 ``dictionary`` 存入settings对象,在 ``process`` 函数中使用。 dictionary是从网络配置中传入的dict对象,即一个将单词字符串映射到单词ID的字典。 + +.. literalinclude:: src/sentimental_provider.py + +网络配置中的调用 +++++++++++++++++ + +调用这个PyDataProvider2的方法,基本上和MNIST样例一致,除了 + +* 在配置中需要读取外部字典。 +* 在声明DataProvider的时候传入dictionary作为参数。 + +.. literalinclude:: src/sentimental_config.py + :emphasize-lines: 12-14 + +参考(Reference) +--------------- + +@provider ++++++++++ + +``@provider`` 是一个Python的 `Decorator`_ ,可以将某一个函数标记成一个PyDataProvider2。如果不了解 `Decorator`_ 是什么也没关系,只需知道这是一个标记属性的方法就可以了。它包含的属性参数如下: + +* input_types:数据输入格式。具体的格式说明,请参考 `input_types`_ 。 +* should_shuffle:是不是要对数据做Shuffle。训练时默认shuffle,测试时默认不shuffle。 +* min_pool_size:设置内存中最小暂存的数据条数,也是PaddlePaddle所能够保证的shuffle粒度。如果为-1,则会预先读取全部数据到内存中。 +* pool_size: 设置内存中暂存的数据条数。如果为-1(默认),则不在乎内存暂存多少条数据。如果设置,则推荐大于训练时batch size的值,并且在内存足够的情况下越大越好。 +* can_over_batch_size:是否允许暂存略微多余pool_size的数据。由于这样做可以避免很多死锁问题,一般推荐设置成True。 +* calc_batch_size:可以传入一个函数,用于自定义每条数据的batch size(默认为1)。 +* cache: 数据缓存的策略,具体请参考 `cache`_ 。 +* init_hook:初始化时调用的函数,具体请参考 `init_hook`_ 。 +* check:如果为true,会根据input_types检查数据的合法性。 +* check_fail_continue:如果为true,那么当check出数据不合法时,会扔到这条数据,继续训练或预测。(对check=false的情况,没有作用) + +input_types ++++++++++++ + +PaddlePaddle的数据包括四种主要类型,和三种序列模式。 + +四种数据类型: + +* dense_vector:稠密的浮点数向量。 +* sparse_binary_vector:稀疏的01向量,即大部分值为0,但有值的地方必须为1。 +* sparse_float_vector:稀疏的向量,即大部分值为0,但有值的部分可以是任何浮点数。 +* integer:整数标签。 + +三种序列模式: + +* SequenceType.NO_SEQUENCE:不是一条序列 +* SequenceType.SEQUENCE:是一条时间序列 +* SequenceType.SUB_SEQUENCE: 是一条时间序列,且序列的每一个元素还是一个时间序列。 + +不同的数据类型和序列模式返回的格式不同,列表如下: + ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ +| | NO_SEQUENCE | SEQUENCE | SUB_SEQUENCE | ++======================+=====================+===================================+================================================+ +| dense_vector | [f, f, ...] | [[f, ...], [f, ...], ...] | [[[f, ...], ...], [[f, ...], ...],...] | ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ +| sparse_binary_vector | [i, i, ...] | [[i, ...], [i, ...], ...] | [[[i, ...], ...], [[i, ...], ...],...] | ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ +| sparse_float_vector | [(i,f), (i,f), ...] | [[(i,f), ...], [(i,f), ...], ...] | [[[(i,f), ...], ...], [[(i,f), ...], ...],...] | ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ +| integer_value | i | [i, i, ...] | [[i, ...], [i, ...], ...] | ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ + +其中,f代表一个浮点数,i代表一个整数。 + +注意:对sparse_binary_vector和sparse_float_vector,PaddlePaddle存的是有值位置的索引。例如, + +- 对一个5维非序列的稀疏01向量 ``[0, 1, 1, 0, 0]`` ,类型是sparse_binary_vector,返回的是 ``[1, 2]`` 。 +- 对一个5维非序列的稀疏浮点向量 ``[0, 0.5, 0.7, 0, 0]`` ,类型是sparse_float_vector,返回的是 ``[(1, 0.5), (2, 0.7)]`` 。 + +init_hook ++++++++++ + +init_hook可以传入一个函数。该函数在初始化的时候会被调用,其参数如下: + +* 第一个参数是settings对象,它和数据传入函数的第一个参数(如本例中 ``process`` 函数的 ``settings`` 参数)必须一致。该对象具有以下两个属性: + * settings.input_types:数据输入格式,具体请参考 `input_types`_ 。 + * settings.logger:一个logging对象。 +* 其他参数使用 ``kwargs`` (key word arguments)传入,包括以下两种: + * PaddlePaddle定义的参数: 1)is_train:bool型参数,表示用于训练或预测;2)file_list:所有文件列表。 + * 用户定义的参数:使用args在网络配置中设置。 + +注意:PaddlePaddle保留添加参数的权力,因此init_hook尽量使用 ``**kwargs`` 来接受不使用的函数以保证兼容性。 + +cache ++++++ + +PyDataProvider2提供了两种简单的Cache策略: + +* CacheType.NO_CACHE:不缓存任何数据,每次都会从python端读取数据 +* CacheType.CACHE_PASS_IN_MEM:第一个pass会从python端读取数据,剩下的pass会直接从内存里 + 读取数据。 + + +注意事项 +-------- + +可能的内存泄露问题 +++++++++++++++++++ + +PaddlePaddle将train.list中的每一行都传递给process函数,从而生成多个generator。当训练数据非常多时,就会生成非常多的generator。 + +虽然每个generator在没有调用的时候,是几乎不占内存的;但当调用过一次后,generator便会存下当前的上下文(Context),而这个Context可能会非常大。并且,generator至少需要调用两次才会知道是否停止。所以,即使process函数里面只有一个yield,也需要两次随机选择到相同generator的时候,才会释放该段内存。 + +.. code-block:: python + + def func(): + yield 0 + + f = func() # 创建generator + tmp = next(f) # 调用一次,返回0 + tmp = next(f) # 调用第二次的时候,才会Stop Iteration + +由于顺序调用这些generator不会出现上述问题,因此有两种解决方案: + +1. **最佳推荐**:将样本的地址放入另一个文本文件,train.list写入那个文本文件的地址。即不要将每一个样本都放入train.list。 +2. 在generator的上下文中尽量留下非常少的变量引用,例如 + +.. code-block:: python + + def real_process(fn): + # ... read from fn + return result # 当函数返回的时候,python可以解除掉内部变量的引用。 + + def process(fn): + yield real_process(fn) + +注意:这个问题是PyDataProvider读数据时候的逻辑问题,很难整体修正。 + +内存不够用的情况 +++++++++++++++++ + +PyDataProvider2会尽可能多的使用内存。因此,对于内存较小的机器,推荐使用 ``pool_size`` 变量来设置内存中暂存的数据条。具体请参考 `@provider`_ 中的说明。 + diff --git a/doc/api/data_provider/pydataprovider2_en.rst b/doc/api/data_provider/pydataprovider2_en.rst index 50e8b0d32923c4fea37f2296a76cf5b44c8364e7..30357be32538db4423ad0eaf899138256c84edc7 100644 --- a/doc/api/data_provider/pydataprovider2_en.rst +++ b/doc/api/data_provider/pydataprovider2_en.rst @@ -1,4 +1,4 @@ -.. _api_pydataprovider2_en: +.. _api_pydataprovider2: PyDataProvider2 =============== @@ -24,18 +24,18 @@ of 28 x 28 pixels. A small part of the original data as an example is shown as below: -.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_train.txt +.. literalinclude:: src/mnist_train.txt Each line of the data contains two parts, separated by :code:`;`. The first part is label of an image. The second part contains 28x28 pixel float values. Just write path of the above data into train.list. It looks like this: -.. literalinclude:: ../../../doc_cn/ui/data_provider/train.list +.. literalinclude:: src/train.list The corresponding dataprovider is shown as below: -.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_provider.py +.. literalinclude:: src/mnist_provider.dict.py The first line imports PyDataProvider2 package. The main function is the process function, that has two parameters. @@ -74,7 +74,7 @@ sample by using keywords :code:`yield`. Only a few lines of codes need to be added into the training configuration file, you can take this as an example. -.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_config.py +.. literalinclude:: src/mnist_config.py Here we specify training data by :code:`train.list`, and no testing data is specified. The method which actually provide data is :code:`process`. @@ -83,7 +83,7 @@ User also can use another style to provide data, which defines the :code:`data_layer`'s name explicitly when `yield`. For example, the :code:`dataprovider` is shown as below. -.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_provider.dict.py +.. literalinclude:: src/mnist_provider.dict.py :linenos: If user did't give the :code:`data_layer`'s name, PaddlePaddle will use @@ -104,7 +104,7 @@ And PaddlePadle will do all of the rest things\: Is this cool? -.. _api_pydataprovider2_en_sequential_model: +.. _api_pydataprovider2_sequential_model: DataProvider for the sequential model ------------------------------------- @@ -121,11 +121,11 @@ negative sentiment (marked by 0 and 1 respectively). A small part of the original data as an example can be found in the path below: -.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_train.txt +.. literalinclude:: src/sentimental_train.txt The corresponding data provider can be found in the path below: -.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_provider.py +.. literalinclude:: src/sentimental_provider.py This data provider for sequential model is a little more complex than that for MINST dataset. @@ -143,7 +143,7 @@ initialized. The :code:`on_init` function has the following parameters: To pass these parameters into DataProvider, the following lines should be added into trainer configuration file. -.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_config.py +.. literalinclude:: src/sentimental_config.py The definition is basically same as MNIST example, except: * Load dictionary in this configuration diff --git a/doc_cn/ui/data_provider/mnist_config.py b/doc/api/data_provider/src/mnist_config.py similarity index 100% rename from doc_cn/ui/data_provider/mnist_config.py rename to doc/api/data_provider/src/mnist_config.py diff --git a/doc_cn/ui/data_provider/mnist_provider.dict.py b/doc/api/data_provider/src/mnist_provider.dict.py similarity index 100% rename from doc_cn/ui/data_provider/mnist_provider.dict.py rename to doc/api/data_provider/src/mnist_provider.dict.py diff --git a/doc_cn/ui/data_provider/mnist_train.txt b/doc/api/data_provider/src/mnist_train.txt similarity index 100% rename from doc_cn/ui/data_provider/mnist_train.txt rename to doc/api/data_provider/src/mnist_train.txt diff --git a/doc_cn/ui/data_provider/sentimental_config.py b/doc/api/data_provider/src/sentimental_config.py similarity index 100% rename from doc_cn/ui/data_provider/sentimental_config.py rename to doc/api/data_provider/src/sentimental_config.py diff --git a/doc_cn/ui/data_provider/sentimental_provider.py b/doc/api/data_provider/src/sentimental_provider.py similarity index 100% rename from doc_cn/ui/data_provider/sentimental_provider.py rename to doc/api/data_provider/src/sentimental_provider.py diff --git a/doc_cn/ui/data_provider/sentimental_train.txt b/doc/api/data_provider/src/sentimental_train.txt similarity index 100% rename from doc_cn/ui/data_provider/sentimental_train.txt rename to doc/api/data_provider/src/sentimental_train.txt diff --git a/doc_cn/ui/data_provider/train.list b/doc/api/data_provider/src/train.list similarity index 100% rename from doc_cn/ui/data_provider/train.list rename to doc/api/data_provider/src/train.list diff --git a/doc/api/index_cn.rst b/doc/api/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..2d54af84b8d894842383f11ec1a57f841b0f920c --- /dev/null +++ b/doc/api/index_cn.rst @@ -0,0 +1,37 @@ +API +=== + +DataProvider API +---------------- + +.. toctree:: + :maxdepth: 1 + + data_provider/dataprovider_cn.rst + data_provider/pydataprovider2_cn.rst + +.. _api_trainer_config: + +Model Config API +---------------- + +.. toctree:: + :maxdepth: 1 + + trainer_config_helpers/optimizers.rst + trainer_config_helpers/data_sources.rst + trainer_config_helpers/layers.rst + trainer_config_helpers/activations.rst + trainer_config_helpers/poolings.rst + trainer_config_helpers/networks.rst + trainer_config_helpers/evaluators.rst + trainer_config_helpers/attrs.rst + + +Applications API +---------------- + +.. toctree:: + :maxdepth: 1 + + predict/swig_py_paddle_cn.rst diff --git a/doc/api/index_en.rst b/doc/api/index_en.rst index 6fdee9f928dd7057cec58f740bf7520af54a24fb..10c297a71d6988c002de868e804ed9ee2345fbd7 100644 --- a/doc/api/index_en.rst +++ b/doc/api/index_en.rst @@ -7,7 +7,7 @@ DataProvider API .. toctree:: :maxdepth: 1 - data_provider/index_en.rst + data_provider/dataprovider_en.rst data_provider/pydataprovider2_en.rst .. _api_trainer_config: diff --git a/doc/api/predict/predict_sample.py b/doc/api/predict/src/predict_sample.py similarity index 100% rename from doc/api/predict/predict_sample.py rename to doc/api/predict/src/predict_sample.py diff --git a/doc_cn/ui/predict/swig_py_paddle.rst b/doc/api/predict/swig_py_paddle_cn.rst similarity index 97% rename from doc_cn/ui/predict/swig_py_paddle.rst rename to doc/api/predict/swig_py_paddle_cn.rst index 05f25345c5246687363dee1931310120b5723d0b..15e35353bb25e7906191e47eae49c824b521c7fd 100644 --- a/doc_cn/ui/predict/swig_py_paddle.rst +++ b/doc/api/predict/swig_py_paddle_cn.rst @@ -34,7 +34,7 @@ PaddlePaddle使用swig对常用的预测接口进行了封装,通过编译会 如下是一段使用mnist model来实现手写识别的预测代码。完整的代码见 ``src_root/doc/ui/predict/predict_sample.py`` 。mnist model可以通过 ``src_root\demo\mnist`` 目录下的demo训练出来。 -.. literalinclude:: ../../../doc/ui/predict/predict_sample.py +.. literalinclude:: src/predict_sample.py :language: python :lines: 15-18,121-136 diff --git a/doc/api/predict/swig_py_paddle_en.rst b/doc/api/predict/swig_py_paddle_en.rst index 8b145e5b30a88db9f61c63249885dac92dd1fa9c..1c628e6971fa5643e6a9ca629488049957686193 100644 --- a/doc/api/predict/swig_py_paddle_en.rst +++ b/doc/api/predict/swig_py_paddle_en.rst @@ -13,7 +13,7 @@ Here is a sample python script that shows the typical prediction process for the MNIST classification problem. A complete sample code could be found at :code:`src_root/doc/ui/predict/predict_sample.py`. -.. literalinclude:: ./predict_sample.py +.. literalinclude:: src/predict_sample.py :language: python :lines: 15-18,90-100,101-104 @@ -23,7 +23,7 @@ python's :code:`help()` function. Let's walk through the above python script: * At the beginning, use :code:`swig_paddle.initPaddle()` to initialize PaddlePaddle with command line arguments, for more about command line arguments - see :ref:`cmd_detail_introduction_en` . + see :ref:`cmd_detail_introduction` . * Parse the configuration file that is used in training with :code:`parse_config()`. Because data to predict with always have no label, and output of prediction work normally is the output layer rather than the cost layer, so you should modify @@ -36,7 +36,7 @@ python's :code:`help()` function. Let's walk through the above python script: - Note: As swig_paddle can only accept C++ matrices, we offer a utility class DataProviderConverter that can accept the same input data with PyDataProvider2, for more information please refer to document - of :ref:`api_pydataprovider2_en` . + of :ref:`api_pydataprovider2` . * Do the prediction with :code:`forwardTest()`, which takes the converted input data and outputs the activations of the output layer. diff --git a/doc_cn/conf.py.in b/doc/conf.py.cn.in similarity index 98% rename from doc_cn/conf.py.in rename to doc/conf.py.cn.in index 4f3afb814f1e779a711e3535da1f8853aa0d97c6..418d718fbd9c61bff3acb9c2dab0638c0b650bab 100644 --- a/doc_cn/conf.py.in +++ b/doc/conf.py.cn.in @@ -62,7 +62,7 @@ source_suffix = ['.rst', '.md', '.Rmd'] source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = 'index_cn' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -79,7 +79,7 @@ language = 'zh_CN' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ['_build', '**/*_en*', '*_en*'] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/doc/conf.py.in b/doc/conf.py.en.in similarity index 97% rename from doc/conf.py.in rename to doc/conf.py.en.in index 01d156e887b623898df09044a800fd067ee116db..e96c25cb75bee20d2e2949423d80ddab1d3450a1 100644 --- a/doc/conf.py.in +++ b/doc/conf.py.en.in @@ -63,7 +63,7 @@ source_suffix = ['.rst', '.md', '.Rmd'] source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = 'index_en' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -80,7 +80,7 @@ language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ['_build', '**/*_cn*', '*_cn*'] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -144,6 +144,6 @@ def setup(app): # no c++ API for now app.add_config_value('recommonmark_config', { 'url_resolver': lambda url: github_doc_root + url, - 'enable_eval_rst': True, + 'enable_eval_rst': True, }, True) app.add_transform(AutoStructify) diff --git a/doc_cn/faq/index.rst b/doc/faq/index_cn.rst similarity index 97% rename from doc_cn/faq/index.rst rename to doc/faq/index_cn.rst index df8f1308cbc4d93cfeab4d921dcbbf5155eb4cc1..abdb5c7cf90dc733b896c9c3d59ce653678f792b 100644 --- a/doc_cn/faq/index.rst +++ b/doc/faq/index_cn.rst @@ -1,5 +1,5 @@ #################### -PaddlePaddle常见问题 +FAQ #################### .. contents:: @@ -33,10 +33,9 @@ PyDataProvider使用的是异步加载,同时在内存里直接随即选取数 个内存池实际上决定了shuffle的粒度。所以,如果将这个内存池减小,又要保证数据是随机的, 那么最好将数据文件在每次读取之前做一次shuffle。可能的代码为 -.. literalinclude:: reduce_min_pool_size.py +.. literalinclude:: src/reduce_min_pool_size.py -这样做可以极大的减少内存占用,并且可能会加速训练过程,详细文档参考 `这里 -<../ui/data_provider/pydataprovider2.html#provider>`_ 。 +这样做可以极大的减少内存占用,并且可能会加速训练过程,详细文档参考 `这里 <../ui/data_provider/pydataprovider2.html#provider>`_ 。 神经元激活内存 ++++++++++++++ @@ -76,7 +75,7 @@ PaddlePaddle支持非常多的优化算法(Optimizer),不同的优化算法需 使用 :code:`pydataprovider`时,可以减少缓存池的大小,同时设置内存缓存功能,即可以极大的加速数据载入流程。 :code:`DataProvider` 缓存池的减小,和之前减小通过减小缓存池来减小内存占用的原理一致。 -.. literalinclude:: reduce_min_pool_size.py +.. literalinclude:: src/reduce_min_pool_size.py 同时 :code:`@provider` 接口有一个 :code:`cache` 参数来控制缓存方法,将其设置成 :code:`CacheType.CACHE_PASS_IN_MEM` 的话,会将第一个 :code:`pass` (过完所有训练数据即为一个pass)生成的数据缓存在内存里,在之后的 :code:`pass` 中,不会再从 :code:`python` 端读取数据,而是直接从内存的缓存里读取数据。这也会极大减少数据读入的耗时。 @@ -90,11 +89,11 @@ PaddlePaddle支持Sparse的训练,sparse训练需要训练特征是 :code:`spa 使用一个词前两个词和后两个词,来预测这个中间的词。这个任务的DataProvider为\: -.. literalinclude:: word2vec_dataprovider.py +.. literalinclude:: src/word2vec_dataprovider.py 这个任务的配置为\: -.. literalinclude:: word2vec_config.py +.. literalinclude:: src/word2vec_config.py 更多关于sparse训练的内容请参考 `sparse训练的文档 `_ @@ -158,7 +157,7 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。 7. *-cp27mu-linux_x86_64.whl is not a supported wheel on this platform. ------------------------------------------------------------------------ +--------------------------------------------------------------------------- 出现这个问题的主要原因是,系统编译wheel包的时候,使用的 :code:`wheel` 包是最新的, 而系统中的 :code:`pip` 包比较老。具体的解决方法是,更新 :code:`pip` 包并重新编译PaddlePaddle。 @@ -220,7 +219,7 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 10. CMake源码编译, 找到的PythonLibs和PythonInterp版本不一致 ----------------------------------------------------------- +---------------------------------------------------------------- 这是目前CMake寻找Python的逻辑存在缺陷,如果系统安装了多个Python版本,CMake找到的Python库和Python解释器版本可能有不一致现象,导致编译PaddlePaddle失败。正确的解决方法是, 用户强制指定特定的Python版本,具体操作如下: @@ -231,7 +230,7 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 用户需要指定本机上Python的路径:````, ````, ```` -10. A protocol message was rejected because it was too big +10. A protocol message was rejected because it was too big ---------------------------------------------------------- 如果在训练NLP相关模型时,出现以下错误: diff --git a/doc_cn/faq/reduce_min_pool_size.py b/doc/faq/src/reduce_min_pool_size.py similarity index 100% rename from doc_cn/faq/reduce_min_pool_size.py rename to doc/faq/src/reduce_min_pool_size.py diff --git a/doc_cn/faq/word2vec_config.py b/doc/faq/src/word2vec_config.py similarity index 100% rename from doc_cn/faq/word2vec_config.py rename to doc/faq/src/word2vec_config.py diff --git a/doc_cn/faq/word2vec_dataprovider.py b/doc/faq/src/word2vec_dataprovider.py similarity index 100% rename from doc_cn/faq/word2vec_dataprovider.py rename to doc/faq/src/word2vec_dataprovider.py diff --git a/doc_cn/introduction/index.rst b/doc/getstarted/basic_usage/index_cn.rst similarity index 87% rename from doc_cn/introduction/index.rst rename to doc/getstarted/basic_usage/index_cn.rst index c996f5f4acd07011c98c3e1086080e85ed7dd1b4..8b84306ed7f8339f8c5bfa90a338fb2f108ca3ca 100644 --- a/doc_cn/introduction/index.rst +++ b/doc/getstarted/basic_usage/index_cn.rst @@ -58,6 +58,7 @@ PaddlePaddle是源于百度的一个深度学习平台。这份简短的介绍 cost = regression_cost(input= ȳ, label=y) outputs(cost) + 这段简短的配置展示了PaddlePaddle的基本用法: - 第一部分定义了数据输入。一般情况下,PaddlePaddle先从一个文件列表里获得数据文件地址,然后交给用户自定义的函数(例如上面的 `process`函数)进行读入和预处理从而得到真实输入。本文中由于输入数据是随机生成的不需要读输入文件,所以放一个空列表(`empty.list`)即可。 @@ -65,10 +66,10 @@ PaddlePaddle是源于百度的一个深度学习平台。这份简短的介绍 - 第二部分主要是选择学习算法,它定义了模型参数改变的规则。PaddlePaddle提供了很多优秀的学习算法,这里使用一个基于momentum的随机梯度下降(SGD)算法,该算法每批量(batch)读取12个采样数据进行随机梯度计算来更新更新。 - 最后一部分是神经网络的配置。由于PaddlePaddle已经实现了丰富的网络层,所以很多时候你需要做的只是定义正确的网络层并把它们连接起来。这里使用了三种网络单元: - - - **数据层**:数据层 `data_layer` 是神经网络的入口,它读入数据并将它们传输到接下来的网络层。这里数据层有两个,分别对应于变量 `x` 和 `y`。 - - **全连接层**:全连接层 `fc_layer` 是基础的计算单元,这里利用它建模变量之间的线性关系。计算单元是神经网络的核心,PaddlePaddle支持大量的计算单元和任意深度的网络连接,从而可以拟合任意的函数来学习复杂的数据关系。 - - **回归误差代价层**:回归误差代价层 `regression_cost` 是众多误差代价函数层的一种,它们在训练过程作为网络的出口,用来计算模型的误差,是模型参数优化的目标函数。 + + - **数据层**:数据层 `data_layer` 是神经网络的入口,它读入数据并将它们传输到接下来的网络层。这里数据层有两个,分别对应于变量 `x` 和 `y`。 + - **全连接层**:全连接层 `fc_layer` 是基础的计算单元,这里利用它建模变量之间的线性关系。计算单元是神经网络的核心,PaddlePaddle支持大量的计算单元和任意深度的网络连接,从而可以拟合任意的函数来学习复杂的数据关系。 + - **回归误差代价层**:回归误差代价层 `regression_cost` 是众多误差代价函数层的一种,它们在训练过程作为网络的出口,用来计算模型的误差,是模型参数优化的目标函数。 定义了网络结构并保存为 `trainer_config.py` 之后,运行以下训练命令: @@ -99,8 +100,8 @@ PaddlePaddle将每个模型参数作为一个numpy数组单独存为一个文件 # w=1.999743, b=0.300137 .. image:: ./parameters.png - :align: center - :scale: 80 % + :align: center + :scale: 80 % 从图中可以看到,虽然 `w` 和 `b` 都使用随机值初始化,但在起初的几轮训练中它们都在快速逼近真实值,并且后续仍在不断改进,使得最终得到的模型几乎与真实模型一致。 diff --git a/doc/getstarted/build_and_install/build_from_source_en.md b/doc/getstarted/build_and_install/build_from_source_en.md index 5db871d59ae83666263d03a6ea3b504d323293ee..aaa07d49d3148266db27670a98c2b27db4dc0a8f 100644 --- a/doc/getstarted/build_and_install/build_from_source_en.md +++ b/doc/getstarted/build_and_install/build_from_source_en.md @@ -49,10 +49,8 @@ PaddlePaddle supports some build options. To enable it, first you need to instal WITH_GPUCompile with GPU mode. WITH_DOUBLECompile with double precision floating-point, default: single precision. -WITH_GLOGCompile with glog. If not found, default: an internal log implementation. -WITH_GFLAGSCompile with gflags. If not found, default: an internal flag implementation. WITH_TESTINGCompile with gtest for PaddlePaddle's unit testing. -WITH_DOC Compile to generate PaddlePaddle's docs, default: disabled (OFF). +WITH_DOC Compile to generate PaddlePaddle's docs, default: disabled (OFF). WITH_SWIG_PYCompile with python predict API, default: disabled (OFF). WITH_STYLE_CHECKCompile with code style check, default: enabled (ON). diff --git a/doc_cn/build_and_install/cmake/compile_options.rst b/doc/getstarted/build_and_install/cmake/build_from_source_cn.rst similarity index 94% rename from doc_cn/build_and_install/cmake/compile_options.rst rename to doc/getstarted/build_and_install/cmake/build_from_source_cn.rst index f345ead2bf851bdad7be2fb8185d16fd2a318a66..3a52c8723bbccd70dd89e8913092d92813925f90 100644 --- a/doc_cn/build_and_install/cmake/compile_options.rst +++ b/doc/getstarted/build_and_install/cmake/build_from_source_cn.rst @@ -1,43 +1,43 @@ -PaddlePaddle的编译选项 -====================== - -PaddlePaddle的编译选项,包括生成CPU/GPU二进制文件、链接何种BLAS库等。用户可在调用cmake的时候设置它们,详细的cmake使用方法可以参考 `官方文档 `_ 。 - -Bool型的编译选项 ----------------- -用户可在cmake的命令行中,通过使用 ``-D`` 命令设置该类编译选项,例如 - -.. code-block:: bash - - cmake .. -DWITH_GPU=OFF - -.. csv-table:: Bool型的编译选项 - :widths: 1, 7, 2 - :file: compile_options.csv - -BLAS/CUDA/Cudnn的编译选项 --------------------------- -BLAS -+++++ - -PaddlePaddle支持以下任意一种BLAS库:`MKL `_ ,`ATLAS `_ ,`OpenBlAS `_ 和 `REFERENCE BLAS `_ 。 - -.. csv-table:: BLAS路径相关的编译选项 - :widths: 1, 2, 7 - :file: cblas_settings.csv - -CUDA/Cudnn -+++++++++++ - -PaddlePaddle可以使用cudnn v2之后的任何一个版本来编译运行,但尽量请保持编译和运行使用的cudnn是同一个版本。 我们推荐使用最新版本的cudnn v5.1。 - -编译选项的设置 -++++++++++++++ - -PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/Cudnn库。cmake编译时,首先在系统路径(/usr/lib\:/usr/local/lib)中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用 ``-D`` 命令可以设置,例如 - -.. code-block:: bash - - cmake .. -DMKL_ROOT=/opt/mkl/ -DCUDNN_ROOT=/opt/cudnnv5 - +PaddlePaddle的编译选项 +====================== + +PaddlePaddle的编译选项,包括生成CPU/GPU二进制文件、链接何种BLAS库等。用户可在调用cmake的时候设置它们,详细的cmake使用方法可以参考 `官方文档 `_ 。 + +Bool型的编译选项 +---------------- +用户可在cmake的命令行中,通过使用 ``-D`` 命令设置该类编译选项,例如 + +.. code-block:: bash + + cmake .. -DWITH_GPU=OFF + +.. csv-table:: Bool型的编译选项 + :widths: 1, 7, 2 + :file: compile_options.csv + +BLAS/CUDA/Cudnn的编译选项 +-------------------------- +BLAS ++++++ + +PaddlePaddle支持以下任意一种BLAS库:`MKL `_ ,`ATLAS `_ ,`OpenBlAS `_ 和 `REFERENCE BLAS `_ 。 + +.. csv-table:: BLAS路径相关的编译选项 + :widths: 1, 2, 7 + :file: cblas_settings.csv + +CUDA/Cudnn ++++++++++++ + +PaddlePaddle可以使用cudnn v2之后的任何一个版本来编译运行,但尽量请保持编译和运行使用的cudnn是同一个版本。 我们推荐使用最新版本的cudnn v5.1。 + +编译选项的设置 +++++++++++++++ + +PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/Cudnn库。cmake编译时,首先在系统路径(/usr/lib\:/usr/local/lib)中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用 ``-D`` 命令可以设置,例如 + +.. code-block:: bash + + cmake .. -DMKL_ROOT=/opt/mkl/ -DCUDNN_ROOT=/opt/cudnnv5 + 注意:这几个编译选项的设置,只在第一次cmake的时候有效。如果之后想要重新设置,推荐清理整个编译目录(``rm -rf``)后,再指定。 \ No newline at end of file diff --git a/doc_cn/build_and_install/cmake/cblas_settings.csv b/doc/getstarted/build_and_install/cmake/cblas_settings.csv similarity index 100% rename from doc_cn/build_and_install/cmake/cblas_settings.csv rename to doc/getstarted/build_and_install/cmake/cblas_settings.csv diff --git a/doc_cn/build_and_install/cmake/compile_options.csv b/doc/getstarted/build_and_install/cmake/compile_options.csv similarity index 65% rename from doc_cn/build_and_install/cmake/compile_options.csv rename to doc/getstarted/build_and_install/cmake/compile_options.csv index 12b45eebb2822d77447fa1bc754360605971dcab..463b825470579d0c3736a408b1e82dd33e6f8d42 100644 --- a/doc_cn/build_and_install/cmake/compile_options.csv +++ b/doc/getstarted/build_and_install/cmake/compile_options.csv @@ -1,14 +1,12 @@ -选项,说明,默认值 -WITH_GPU,是否支持GPU。,取决于是否寻找到CUDA工具链 -WITH_DOUBLE,是否使用双精度浮点数。,否 -WITH_DSO,是否运行时动态加载CUDA动态库,而非静态加载CUDA动态库。,是 -WITH_AVX,是否编译含有AVX指令集的PaddlePaddle二进制文件,是 -WITH_PYTHON,是否内嵌PYTHON解释器。方便今后的嵌入式移植工作。,是 -WITH_STYLE_CHECK,是否编译时进行代码风格检查,是 -WITH_RDMA,是否开启RDMA,否 -WITH_GLOG,是否开启GLOG。如果不开启,则会使用一个简化版的日志,同时方便今后的嵌入式移植工作。,取决于是否寻找到GLOG -WITH_GFLAGS,是否使用GFLAGS。如果不开启,则会使用一个简化版的命令行参数解析器,同时方便今后的嵌入式移植工作。,取决于是否寻找到GFLAGS -WITH_TIMER,是否开启计时功能。如果开启会导致运行略慢,打印的日志变多,但是方便调试和测Benchmark,否 -WITH_TESTING,是否开启单元测试,取决于是否寻找到GTEST -WITH_DOC,是否编译中英文文档,否 +选项,说明,默认值 +WITH_GPU,是否支持GPU。,取决于是否寻找到CUDA工具链 +WITH_DOUBLE,是否使用双精度浮点数。,否 +WITH_DSO,是否运行时动态加载CUDA动态库,而非静态加载CUDA动态库。,是 +WITH_AVX,是否编译含有AVX指令集的PaddlePaddle二进制文件,是 +WITH_PYTHON,是否内嵌PYTHON解释器。方便今后的嵌入式移植工作。,是 +WITH_STYLE_CHECK,是否编译时进行代码风格检查,是 +WITH_RDMA,是否开启RDMA,否 +WITH_TIMER,是否开启计时功能。如果开启会导致运行略慢,打印的日志变多,但是方便调试和测Benchmark,否 +WITH_TESTING,是否开启单元测试,取决于是否寻找到GTEST +WITH_DOC,是否编译中英文文档,否 WITH_SWIG_PY,是否编译PYTHON的SWIG接口,该接口可用于预测和定制化训练,取决于是否寻找到SWIG \ No newline at end of file diff --git a/doc_cn/build_and_install/install/docker_install.rst b/doc/getstarted/build_and_install/docker_install_cn.rst similarity index 93% rename from doc_cn/build_and_install/install/docker_install.rst rename to doc/getstarted/build_and_install/docker_install_cn.rst index 40339659be406ec72da8ad89b6d5dd38d72bb5ae..35234e0eb3ece3cb20d62841c1d75e60b485b9ea 100644 --- a/doc_cn/build_and_install/install/docker_install.rst +++ b/doc/getstarted/build_and_install/docker_install_cn.rst @@ -111,7 +111,24 @@ cuda相关的Driver和设备映射进container中,脚本类似于 简单的含有ssh的Dockerfile如下: -.. literalinclude:: paddle_ssh.Dockerfile +.. code-block:: bash + + FROM paddledev/paddle:cpu-latest + + MAINTAINER PaddlePaddle dev team + + RUN apt-get update + RUN apt-get install -y openssh-server + RUN mkdir /var/run/sshd + RUN echo 'root:root' | chpasswd + + RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config + RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config + + EXPOSE 22 + + CMD ["/usr/sbin/sshd", "-D"] + 使用该Dockerfile构建出镜像,然后运行这个container即可。相关命令为\: diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/getstarted/build_and_install/docker_install_en.rst index 8df7e063a1ffba5ed4b4bad409d35671de53a633..4708890e48323352f444f45f00b692028a84f791 100644 --- a/doc/getstarted/build_and_install/docker_install_en.rst +++ b/doc/getstarted/build_and_install/docker_install_en.rst @@ -17,7 +17,7 @@ CPU-only one and a CUDA GPU one. We do so by configuring `dockerhub.com `_ automatically runs the following commands: -.. code-block:: base +.. code-block:: bash docker build -t paddle:cpu -f paddle/scripts/docker/Dockerfile . docker build -t paddle:gpu -f paddle/scripts/docker/Dockerfile.gpu . diff --git a/doc_cn/build_and_install/index.rst b/doc/getstarted/build_and_install/index_cn.rst similarity index 61% rename from doc_cn/build_and_install/index.rst rename to doc/getstarted/build_and_install/index_cn.rst index 48163fb36e561fe5fd8f6907379687a8b5c97f68..e599aab2cb3db8ddb72bcf4229be0b3fbda2b3ff 100644 --- a/doc_cn/build_and_install/index.rst +++ b/doc/getstarted/build_and_install/index_cn.rst @@ -9,8 +9,8 @@ PaddlePaddle提供数个预编译的二进制来进行安装,包括Docker镜 .. toctree:: :maxdepth: 1 - install/docker_install.rst - install/ubuntu_install.rst + docker_install_cn.rst + ubuntu_install_cn.rst @@ -19,9 +19,9 @@ PaddlePaddle提供数个预编译的二进制来进行安装,包括Docker镜 .. warning:: - 编译选项主要推荐高级用户查看,普通用户请走安装流程。 + 编译选项主要推荐高级用户查看,普通用户请走安装流程。 -.. toctree:: - :maxdepth: 1 +.. toctree:: + :maxdepth: 1 - cmake/index.rst + cmake/build_from_source_cn.rst \ No newline at end of file diff --git a/doc_cn/build_and_install/install/ubuntu_install.rst b/doc/getstarted/build_and_install/ubuntu_install_cn.rst similarity index 71% rename from doc_cn/build_and_install/install/ubuntu_install.rst rename to doc/getstarted/build_and_install/ubuntu_install_cn.rst index 4500d6e0b03be9280e3e6c25cddbf7fb389671b8..d02d9c63bbfb50954d7b75f2c685ce167a3b7146 100644 --- a/doc_cn/build_and_install/install/ubuntu_install.rst +++ b/doc/getstarted/build_and_install/ubuntu_install_cn.rst @@ -38,7 +38,18 @@ PaddlePaddle提供了ubuntu 14.04 deb安装包。 安装完成后,可以使用命令 :code:`paddle version` 查看安装后的paddle 版本: -.. literalinclude:: paddle_version.txt +.. code-block:: shell + + PaddlePaddle 0.8.0b1, compiled with + with_avx: ON + with_gpu: OFF + with_double: OFF + with_python: ON + with_rdma: OFF + with_metric_learning: + with_timer: OFF + with_predict_sdk: + 可能遇到的问题 -------------- @@ -48,9 +59,9 @@ libcudart.so/libcudnn.so找不到 安装完成后,运行 :code:`paddle train` 报错\: -.. code-block:: shell +.. code-block:: shell - 0831 12:36:04.151525 1085 hl_dso_loader.cc:70] Check failed: nullptr != *dso_handle For Gpu version of PaddlePaddle, it couldn't find CUDA library: libcudart.so Please make sure you already specify its path.Note: for training data on Cpu using Gpu version of PaddlePaddle,you must specify libcudart.so via LD_LIBRARY_PATH. + 0831 12:36:04.151525 1085 hl_dso_loader.cc:70] Check failed: nullptr != *dso_handle For Gpu version of PaddlePaddle, it couldn't find CUDA library: libcudart.so Please make sure you already specify its path.Note: for training data on Cpu using Gpu version of PaddlePaddle,you must specify libcudart.so via LD_LIBRARY_PATH. 原因是未设置cuda运行时环境变量。 如果使用GPU版本的PaddlePaddle,请安装CUDA 7.5 和CUDNN 5到本地环境中,并设置: diff --git a/doc/getstarted/index_cn.rst b/doc/getstarted/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..a0867a6e592874e0eee6bff9d31c153c4adfe6f5 --- /dev/null +++ b/doc/getstarted/index_cn.rst @@ -0,0 +1,8 @@ +GET STARTED +============ + +.. toctree:: + :maxdepth: 2 + + build_and_install/index_cn.rst + basic_usage/index_cn.rst diff --git a/doc_cn/cluster/k8s/Dockerfile b/doc/howto/cluster/k8s/Dockerfile similarity index 100% rename from doc_cn/cluster/k8s/Dockerfile rename to doc/howto/cluster/k8s/Dockerfile diff --git a/doc_cn/cluster/k8s/distributed_training_on_kubernetes.md b/doc/howto/cluster/k8s/distributed_training_on_k8s_cn.md similarity index 100% rename from doc_cn/cluster/k8s/distributed_training_on_kubernetes.md rename to doc/howto/cluster/k8s/distributed_training_on_k8s_cn.md diff --git a/doc_cn/cluster/k8s/job.yaml b/doc/howto/cluster/k8s/job.yaml similarity index 100% rename from doc_cn/cluster/k8s/job.yaml rename to doc/howto/cluster/k8s/job.yaml diff --git a/doc_cn/cluster/k8s/k8s-paddle-arch.png b/doc/howto/cluster/k8s/k8s-paddle-arch.png similarity index 100% rename from doc_cn/cluster/k8s/k8s-paddle-arch.png rename to doc/howto/cluster/k8s/k8s-paddle-arch.png diff --git a/doc_cn/build_and_install/paddle_on_kubernetes.md b/doc/howto/cluster/k8s/paddle_on_k8s_cn.md similarity index 100% rename from doc_cn/build_and_install/paddle_on_kubernetes.md rename to doc/howto/cluster/k8s/paddle_on_k8s_cn.md diff --git a/doc_cn/cluster/k8s/start.sh b/doc/howto/cluster/k8s/start.sh similarity index 100% rename from doc_cn/cluster/k8s/start.sh rename to doc/howto/cluster/k8s/start.sh diff --git a/doc_cn/cluster/k8s/start_paddle.py b/doc/howto/cluster/k8s/start_paddle.py similarity index 100% rename from doc_cn/cluster/k8s/start_paddle.py rename to doc/howto/cluster/k8s/start_paddle.py diff --git a/doc/howto/cmd_parameter/detail_introduction_en.md b/doc/howto/cmd_parameter/detail_introduction_en.md index 82136b7d4f65ffcdff60243feb25b31a4a468637..27b2faf1d8a9367ff9498a76d363791ab7fbe61c 100644 --- a/doc/howto/cmd_parameter/detail_introduction_en.md +++ b/doc/howto/cmd_parameter/detail_introduction_en.md @@ -1,5 +1,5 @@ ```eval_rst -.. _cmd_detail_introduction_en: +.. _cmd_detail_introduction: ``` # Detail Description diff --git a/doc/howto/cmd_parameter/index_en.md b/doc/howto/cmd_parameter/index_en.md index fb658f2aa5bc0edef7b5dcb24a582d2c4182caa7..a6c236db61f834372152dba0f5049fe37b731086 100644 --- a/doc/howto/cmd_parameter/index_en.md +++ b/doc/howto/cmd_parameter/index_en.md @@ -1,5 +1,5 @@ ```eval_rst -.. _cmd_line_index_en: +.. _cmd_line_index: ``` # How to Set Command-line Parameters diff --git a/doc_cn/concepts/nn.rst b/doc/howto/concepts/nn_cn.rst similarity index 100% rename from doc_cn/concepts/nn.rst rename to doc/howto/concepts/nn_cn.rst diff --git a/doc_cn/concepts/program_concepts.rst b/doc/howto/concepts/program_concepts_cn.rst similarity index 100% rename from doc_cn/concepts/program_concepts.rst rename to doc/howto/concepts/program_concepts_cn.rst diff --git a/doc_cn/concepts/pserver_topology.dot b/doc/howto/concepts/src/pserver_topology.dot similarity index 100% rename from doc_cn/concepts/pserver_topology.dot rename to doc/howto/concepts/src/pserver_topology.dot diff --git a/doc_cn/concepts/trainer_config.py b/doc/howto/concepts/src/trainer_config.py similarity index 100% rename from doc_cn/concepts/trainer_config.py rename to doc/howto/concepts/src/trainer_config.py diff --git a/doc_cn/concepts/use_concepts.rst b/doc/howto/concepts/use_concepts_cn.rst similarity index 89% rename from doc_cn/concepts/use_concepts.rst rename to doc/howto/concepts/use_concepts_cn.rst index 2d27e29fac37d54e4a31540cf75361464f51b193..6b875220880a2b79e24d1008a2eb35a7267df14c 100644 --- a/doc_cn/concepts/use_concepts.rst +++ b/doc/howto/concepts/use_concepts_cn.rst @@ -8,29 +8,29 @@ PaddlePaddle是一个深度学习框架,支持单机模式和多机模式。 本文首先介绍trainer进程中的一些使用概念,然后介绍pserver进程中概念。 -.. contents:: +.. contents:: 系统框图 ======== 下图描述了用户使用框图,PaddlePaddle的trainer进程里内嵌了Python解释器,trainer进程可以利用这个解释器执行Python脚本,Python脚本里定义了模型配置、训练算法、以及数据读取函数。其中,数据读取程序往往定义在一个单独Python脚本文件里,被称为数据提供器(DataProvider),通常是一个Python函数。模型配置、训练算法通常定义在另一单独Python文件中, 称为训练配置文件。下面将分别介绍这两部分。 -.. graphviz:: - - digraph pp_process { - rankdir=LR; - config_file [label="用户神经网络配置"]; - subgraph cluster_pp { - style=filled; - color=lightgrey; - node [style=filled, color=white, shape=box]; - label = "PaddlePaddle C++"; - py [label="Python解释器"]; - } - data_provider [label="用户数据解析"]; - config_file -> py; - py -> data_provider [dir="back"]; - } +.. graphviz:: + + digraph pp_process { + rankdir=LR; + config_file [label="用户神经网络配置"]; + subgraph cluster_pp { + style=filled; + color=lightgrey; + node [style=filled, color=white, shape=box]; + label = "PaddlePaddle C++"; + py [label="Python解释器"]; + } + data_provider [label="用户数据解析"]; + config_file -> py; + py -> data_provider [dir="back"]; + } 数据提供器 ========== @@ -47,7 +47,7 @@ DataProvider是PaddlePaddle系统的数据提供器,将用户的原始数据 一个简单的训练配置文件为: -.. literalinclude:: trainer_config.py +.. literalinclude:: src/trainer_config.py :linenos: 文件开头 ``from paddle.trainer_config_helpers import *`` ,是因为PaddlePaddle配置文件与C++模块通信的最基础协议是protobuf,为了避免用户直接写复杂的protobuf string,我们为用户定以Python接口来配置网络,该Python代码可以生成protobuf包,这就是`trainer_config_helpers`_的作用。因此,在文件的开始,需要import这些函数。 这个包里面包含了模型配置需要的各个模块。 @@ -100,11 +100,11 @@ DataProvider是PaddlePaddle系统的数据提供器,将用户的原始数据 例如,和 ``fc_layer`` 同样功能的 ``mixed_layer`` 是: -.. code-block:: python +.. code-block:: python - data = data_layer(name='data', size=200) - with mixed_layer(size=200) as out: - out += full_matrix_projection(input=data) + data = data_layer(name='data', size=200) + with mixed_layer(size=200) as out: + out += full_matrix_projection(input=data) PaddlePaddle 可以使用 ``mixed layer`` 配置出非常复杂的网络,甚至可以直接配置一个完整的LSTM。用户可以参考 `mixed_layer`_ 的相关文档进行配置。 @@ -114,13 +114,13 @@ PaddlePaddle 可以使用 ``mixed layer`` 配置出非常复杂的网络,甚 PaddlePaddle多机采用经典的 Parameter Server 架构对多个节点的 trainer 进行同步。多机训练的经典拓扑结构如下\: -.. graphviz:: pserver_topology.dot +.. graphviz:: src/pserver_topology.dot 图中每个灰色方块是一台机器,在每个机器中,先使用命令 ``paddle pserver`` 启动一个pserver进程,并指定端口号,可能的参数是\: -.. code-block:: bash +.. code-block:: bash - paddle pserver --port=5000 --num_gradient_servers=4 --tcp_rdma='tcp' --nics='eth0' + paddle pserver --port=5000 --num_gradient_servers=4 --tcp_rdma='tcp' --nics='eth0' * ``--port=5000`` : 指定 pserver 进程端口是 5000 。 * ``--gradient_servers=4`` : 有四个训练进程(PaddlePaddle 将 trainer 也称作 GradientServer ,因为其为负责提供Gradient) 。 @@ -128,9 +128,9 @@ PaddlePaddle多机采用经典的 Parameter Server 架构对多个节点的 trai 启动之后 pserver 进程之后,需要启动 trainer 训练进程,在各个机器上运行如下命令\: -.. code-block:: bash +.. code-block:: bash - paddle train --port=5000 --pservers=192.168.100.101,192.168.100.102,192.168.100.103,192.168.100.104 --config=... + paddle train --port=5000 --pservers=192.168.100.101,192.168.100.102,192.168.100.103,192.168.100.104 --config=... 对于简单的多机协同训练使用上述方式即可。另外,pserver/train 通常在高级情况下,还需要设置下面两个参数\: diff --git a/doc/howto/contribute_to_paddle_en.md b/doc/howto/contribute_to_paddle_en.md index 1decc91d62cc25c5b3157bdc6e0835421be23252..f4b67d64e7db821539e1cc31177de5caf7b14766 100644 --- a/doc/howto/contribute_to_paddle_en.md +++ b/doc/howto/contribute_to_paddle_en.md @@ -1,8 +1,8 @@ # How to Contribute Code We sincerely appreciate your contributions. You can use fork and pull request -workflow to merge your code. - +workflow to merge your code. + ## Code Requirements - Your code must be fully documented by [doxygen](http://www.stack.nl/~dimitri/doxygen/) style. @@ -12,11 +12,11 @@ workflow to merge your code. - Pass all unit tests. The following tutorial guides you into submitting your contibution. - + ## [Creating a Fork](https://help.github.com/articles/fork-a-repo/) - + Just head over to the GitHub page and click the "Fork" button. -It's just that simple. +It's just that simple. ## Clone @@ -25,7 +25,7 @@ The **develop** is the main branch, and other user's branches are feature branch Once you've created a fork, you can use your favorite git client to clone your repo or just head straight to the command line: - + ```shell # Clone your fork to your local machine git clone --branch develop https://github.com/USERNAME/Paddle.git @@ -47,6 +47,22 @@ Then you can start to develop by making a local developement branch git checkout -b MY_COOL_STUFF_BRANCH ``` +## Using `pre-commit` hook + +Paddle developers use [pre-commit](http://pre-commit.com/) tool to manage git +pre-commit hooks. It can help us format source codes (cpp, python), check some +basic thing before commit (only one EOL for each file, do not add a huge file +in git). `pre-commit` tests is a part of unit tests in Travis-CI now, every +PR doesn't fit hook can not be merged into Paddle. + +To use [pre-commit](http://pre-commit.com/), you should install it by +`pip install pre-commit`, and currently, Paddle uses `clang-format` to format +c/cpp sources. Please make sure clang-format 3.8+ installed. + +Then just run `pre-commit install` in your Paddle clone directory. When you +commit your code, the pre-commit hook will check the local code if there is +anything not suitable to commit, and so on. + ## Commit Commit your changes by following command lines: @@ -83,7 +99,7 @@ git pull --rebase upstream develop If there are no unique commits locally, git will simply perform a fast-forward. However, if you have been making changes (in the vast majority of cases you -probably shouldn't be), you may have to deal with conflicts. +probably shouldn't be), you may have to deal with conflicts. Now, your local master branch is up-to-date with everything modified upstream. diff --git a/doc/howto/deep_model/index_cn.rst b/doc/howto/deep_model/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..31f8c39af6010abee9ab8ac2bcde95d44a4afb98 --- /dev/null +++ b/doc/howto/deep_model/index_cn.rst @@ -0,0 +1,10 @@ +How to Configure Deep Models +============================ + +.. toctree:: + :maxdepth: 1 + + rnn/recurrent_group_cn.md + rnn/hierarchical_layer_cn.rst + rnn/hrnn_rnn_api_compare_cn.rst + rnn/hrnn_demo_cn.rst diff --git a/doc_cn/algorithm/rnn/hierarchical-layer.rst b/doc/howto/deep_model/rnn/hierarchical_layer_cn.rst similarity index 100% rename from doc_cn/algorithm/rnn/hierarchical-layer.rst rename to doc/howto/deep_model/rnn/hierarchical_layer_cn.rst diff --git a/doc_cn/algorithm/rnn/hrnn_demo.rst b/doc/howto/deep_model/rnn/hrnn_demo_cn.rst similarity index 100% rename from doc_cn/algorithm/rnn/hrnn_demo.rst rename to doc/howto/deep_model/rnn/hrnn_demo_cn.rst diff --git a/doc_cn/algorithm/rnn/hrnn_rnn_api_compare.rst b/doc/howto/deep_model/rnn/hrnn_rnn_api_compare_cn.rst similarity index 91% rename from doc_cn/algorithm/rnn/hrnn_rnn_api_compare.rst rename to doc/howto/deep_model/rnn/hrnn_rnn_api_compare_cn.rst index 9baa0b578041ab82331a94c2a9e4d081697a5fda..96e52b910a22576fd75c9d4e1bef6e2cf74bc84f 100644 --- a/doc_cn/algorithm/rnn/hrnn_rnn_api_compare.rst +++ b/doc/howto/deep_model/rnn/hrnn_rnn_api_compare_cn.rst @@ -24,18 +24,18 @@ - 本例中的原始数据一共有10个样本。每个样本由两部分组成,一个label(此处都为2)和一个已经分词后的句子。这个数据也被单层RNN网络直接使用。 -.. literalinclude:: ../../../paddle/gserver/tests/Sequence/tour_train_wdseg +.. literalinclude:: ../../../../paddle/gserver/tests/Sequence/tour_train_wdseg :language: text - 双层序列数据一共有4个样本。 每个样本间用空行分开,整体数据和原始数据完全一样。但于双层序列的LSTM来说,第一个样本同时encode两条数据成两个向量。这四条数据同时处理的句子数量为\ :code:`[2, 3, 2, 3]`\ 。 -.. literalinclude:: ../../../paddle/gserver/tests/Sequence/tour_train_wdseg.nest +.. literalinclude:: ../../../../paddle/gserver/tests/Sequence/tour_train_wdseg.nest :language: text 其次,对于两种不同的输入数据类型,不同DataProvider对比如下(`sequenceGen.py `_)\: -.. literalinclude:: ../../../paddle/gserver/tests/sequenceGen.py +.. literalinclude:: ../../../../paddle/gserver/tests/sequenceGen.py :language: python :lines: 21-39 :linenos: @@ -43,10 +43,11 @@ - 这是普通的单层时间序列的DataProvider代码,其说明如下: * DataProvider共返回两个数据,分别是words和label。即上述代码中的第19行。 - - words是原始数据中的每一句话,所对应的词表index数组。它是integer_value_sequence类型的,即整数数组。words即为这个数据中的单层时间序列。 - - label是原始数据中对于每一句话的分类标签,它是integer_value类型的。 -.. literalinclude:: ../../../paddle/gserver/tests/sequenceGen.py + - words是原始数据中的每一句话,所对应的词表index数组。它是integer_value_sequence类型的,即整数数组。words即为这个数据中的单层时间序列。 + - label是原始数据中对于每一句话的分类标签,它是integer_value类型的。 + +.. literalinclude:: ../../../../paddle/gserver/tests/sequenceGen.py :language: python :lines: 42-71 :linenos: @@ -63,7 +64,7 @@ 首先,我们看一下单层RNN的配置。代码中9-15行(高亮部分)即为单层RNN序列的使用代码。这里使用了PaddlePaddle预定义好的RNN处理函数。在这个函数中,RNN对于每一个时间步通过了一个LSTM网络。 -.. literalinclude:: ../../../paddle/gserver/tests/sequence_layer_group.conf +.. literalinclude:: ../../../../paddle/gserver/tests/sequence_layer_group.conf :language: python :lines: 38-63 :linenos: @@ -84,7 +85,7 @@ * 至此,\ :code:`lstm_last`\ 便和单层RNN配置中的\ :code:`lstm_last`\ 具有相同的结果了。 -.. literalinclude:: ../../../paddle/gserver/tests/sequence_nest_layer_group.conf +.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_layer_group.conf :language: python :lines: 38-64 :linenos: @@ -106,7 +107,7 @@ - 单层RNN:过了一个很简单的recurrent_group。每一个时间步,当前的输入y和上一个时间步的输出rnn_state做了一个全链接。 -.. literalinclude:: ../../../paddle/gserver/tests/sequence_rnn.conf +.. literalinclude:: ../../../../paddle/gserver/tests/sequence_rnn.conf :language: python :lines: 36-48 @@ -115,7 +116,7 @@ - 内层inner_step的recurrent_group和单层序列的几乎一样。除了boot_layer=outer_mem,表示将外层的outer_mem作为内层memory的初始状态。外层outer_step中,outer_mem是一个子句的最后一个向量,即整个双层group是将前一个子句的最后一个向量,作为下一个子句memory的初始状态。 - 从输入数据上看,单双层序列的句子是一样的,只是双层序列将其又做了子序列划分。因此双层序列的配置中,必须将前一个子句的最后一个元素,作为boot_layer传给下一个子句的memory,才能保证和单层序列的配置中“每个时间步都用了上一个时间步的输出结果”一致。 -.. literalinclude:: ../../../paddle/gserver/tests/sequence_nest_rnn.conf +.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_rnn.conf :language: python :lines: 39-66 @@ -151,14 +152,14 @@ * 单层RNN\: -.. literalinclude:: ../../../paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py +.. literalinclude:: ../../../../paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py :language: python :lines: 42-59 :linenos: * 双层RNN\ \: -.. literalinclude:: ../../../paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py +.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py :language: python :lines: 41-80 :linenos: @@ -181,11 +182,11 @@ Memory Memory是PaddlePaddle实现RNN时候使用的一个概念。RNN即时间递归神经网络,通常要求时间步之间具有一些依赖性,即当前时间步下的神经网络依赖前一个时间步神经网络中某一个神经元输出。如下图所示。 -.. graphviz:: glossary_rnn.dot +.. graphviz:: src/glossary_rnn.dot 上图中虚线的连接,即是跨越时间步的网络连接。PaddlePaddle在实现RNN的时候,将这种跨越时间步的连接用一个特殊的神经网络单元实现。这个神经网络单元就叫Memory。Memory可以缓存上一个时刻某一个神经元的输出,然后在下一个时间步输入给另一个神经元。使用Memory的RNN实现便如下图所示。 -.. graphviz:: glossary_rnn_with_memory.dot +.. graphviz:: src/glossary_rnn_with_memory.dot 使用这种方式,PaddlePaddle可以比较简单的判断哪些输出是应该跨越时间步的,哪些不是。 diff --git a/doc_cn/algorithm/rnn/rnn-tutorial.md b/doc/howto/deep_model/rnn/recurrent_group_cn.md similarity index 98% rename from doc_cn/algorithm/rnn/rnn-tutorial.md rename to doc/howto/deep_model/rnn/recurrent_group_cn.md index 9e488b0d51956e86f9fb76f450fdb438f596e239..984fdcc505cdd073d0265c496cda5fb3553c22e4 100644 --- a/doc_cn/algorithm/rnn/rnn-tutorial.md +++ b/doc/howto/deep_model/rnn/recurrent_group_cn.md @@ -1,96 +1,96 @@ -# Recurrent Group教程 - -## 概述 - -序列数据是自然语言处理任务面对的一种主要输入数据类型。 - -一句话是由词语构成的序列,多句话进一步构成了段落。因此,段落可以看作是一个嵌套的双层的序列,这个序列的每个元素又是一个序列。 - -双层序列是PaddlePaddle支持的一种非常灵活的数据组织方式,帮助我们更好地描述段落、多轮对话等更为复杂的语言数据。基于双层序列输入,我们可以设计搭建一个灵活的、层次化的RNN,分别从词语和句子级别编码输入数据,同时也能够引入更加复杂的记忆机制,更好地完成一些复杂的语言理解任务。 - -在PaddlePaddle中,`recurrent_group`是一种任意复杂的RNN单元,用户只需定义RNN在一个时间步内完成的计算,PaddlePaddle负责完成信息和误差在时间序列上的传播。 - -更进一步,`recurrent_group`同样可以扩展到双层序列的处理上。通过两个嵌套的`recurrent_group`分别定义子句级别和词语级别上需要完成的运算,最终实现一个层次化的复杂RNN。 - -目前,在PaddlePaddle中,能够对双向序列进行处理的有`recurrent_group`和部分Layer,具体可参考文档:支持双层序列作为输入的Layer。 - -## 相关概念 - -### 基本原理 -`recurrent_group` 是PaddlePaddle支持的一种任意复杂的RNN单元。使用者只需要关注于设计RNN在一个时间步之内完成的计算,PaddlePaddle负责完成信息和梯度在时间序列上的传播。 - -PaddlePaddle中,`recurrent_group`的一个简单调用如下: - -``` python -recurrent_group(step, input, reverse) -``` -- step:一个可调用的函数,定义一个时间步之内RNN单元完成的计算 -- input:输入,必须是一个单层序列,或者一个双层序列 -- reverse:是否以逆序处理输入序列 - -使用`recurrent_group`的核心是设计step函数的计算逻辑。step函数内部可以自由组合PaddlePaddle支持的各种layer,完成任意的运算逻辑。`recurrent_group` 的输入(即input)会成为step函数的输入,由于step 函数只关注于RNN一个时间步之内的计算,在这里`recurrent_group`替我们完成了原始输入数据的拆分。 - -### 输入 -`recurrent_group`处理的输入序列主要分为以下三种类型: - -- **数据输入**:一个双层序列进入`recurrent_group`会被拆解为一个单层序列,一个单层序列进入`recurrent_group`会被拆解为非序列,然后交给step函数,这一过程对用户是完全透明的。可以有以下两种:1)通过data_layer拿到的用户输入;2)其它layer的输出。 - -- **只读Memory输入**:`StaticInput` 定义了一个只读的Memory,由`StaticInput`指定的输入不会被`recurrent_group`拆解,`recurrent_group` 循环展开的每个时间步总是能够引用所有输入,可以是一个非序列,或者一个单层序列。 - -- **序列生成任务的输入**:`GeneratedInput`只用于在序列生成任务中指定输入数据。 - -### 输入示例 - -序列生成任务大多遵循encoder-decoer架构,encoder和decoder可以是能够处理序列的任意神经网络单元,而RNN是最流行的选择。 - -给定encoder输出和当前词,decoder每次预测产生下一个最可能的词语。在这种结构中,decoder接受两个输入: - -- 要生成的目标序列:是decoder的数据输入,也是decoder循环展开的依据,`recurrent_group`会对这类输入进行拆解。 - -- encoder输出,可以是一个非序列,或者一个单层序列:是一个unbounded memory,decoder循环展开的每一个时间步会引用全部结果,不应该被拆解,这种类型的输入必须通过`StaticInput`指定。关于Unbounded Memory的更多讨论请参考论文 [Neural Turning Machine](https://arxiv.org/abs/1410.5401)。 - -在序列生成任务中,decoder RNN总是引用上一时刻预测出的词的词向量,作为当前时刻输入。`GeneratedInput`自动完成这一过程。 - -### 输出 -`step`函数必须返回一个或多个Layer的输出,这个Layer的输出会作为整个`recurrent_group` 最终的输出结果。在输出的过程中,`recurrent_group` 会将每个时间步的输出拼接,这个过程对用户也是透明的。 - -### memory -memory只能在`recurrent_group`中定义和使用。memory不能独立存在,必须指向一个PaddlePaddle定义的Layer。引用memory得到这layer上一时刻输出,因此,可以将memory理解为一个时延操作。 - -可以显示地指定一个layer的输出用于初始化memory。不指定时,memory默认初始化为0。 - -## 双层RNN介绍 -`recurrent_group`帮助我们完成对输入序列的拆分,对输出的合并,以及计算逻辑在序列上的循环展开。 - -利用这种特性,两个嵌套的`recurrent_group`能够处理双层序列,实现词语和句子两个级别的双层RNN结构。 - -- 单层(word-level)RNN:每个状态(state)对应一个词(word)。 -- 双层(sequence-level)RNN:一个双层RNN由多个单层RNN组成,每个单层RNN(即双层RNN的每个状态)对应一个子句(subseq)。 - -为了描述方便,下文以NLP任务为例,将含有子句(subseq)的段落定义为一个双层序列,将含有词语的句子定义为一个单层序列,那么0层序列即为一个词语。 - -## 双层RNN的使用 - -### 训练流程的使用方法 -使用 `recurrent_group`需要遵循以下约定: - -- **单进单出**:输入和输出都是单层序列。 - - 如果有多个输入,不同输入序列含有的词语数必须严格相等。 - - 输出一个单层序列,输出序列的词语数和输入序列一致。 - - memory:在step函数中定义 memory指向一个layer,通过引用memory得到这个layer上一个时刻输出,形成recurrent 连接。memory的is_seq参数必须为false。如果没有定义memory,每个时间步之内的运算是独立的。 - - boot_layer:memory的初始状态,默认初始状为0,memory的is_seq参数必须为false。 - -- **双进双出**:输入和输出都是双层序列。 - - 如果有多个输入序列,不同输入含有的子句(subseq)数必须严格相等,但子句含有的词语数可以不相等。 - - 输出一个双层序列,子句(subseq)数、子句的单词数和指定的一个输入序列一致,默认为第一个输入。 - - memory:在step函数中定义memory,指向一个layer,通过引用memory得到这个layer上一个时刻的输出,形成recurrent连接。定义在外层`recurrent_group` step函数中的memory,能够记录上一个subseq 的状态,可以是一个单层序列(只作为read-only memory),也可以是一个词语。如果没有定义memory,那么 subseq 之间的运算是独立的。 - - boot_layer:memory 初始状态,可以是一个单层序列(只作为read-only memory)或一个向量。默认不设置,即初始状态为0。 - -- **双进单出**:目前还未支持,会报错"In hierachical RNN, all out links should be from sequences now"。 - - -### 生成流程的使用方法 -使用`beam_search`需要遵循以下约定: - -- 单层RNN:从一个word生成下一个word。 +# Recurrent Group教程 + +## 概述 + +序列数据是自然语言处理任务面对的一种主要输入数据类型。 + +一句话是由词语构成的序列,多句话进一步构成了段落。因此,段落可以看作是一个嵌套的双层的序列,这个序列的每个元素又是一个序列。 + +双层序列是PaddlePaddle支持的一种非常灵活的数据组织方式,帮助我们更好地描述段落、多轮对话等更为复杂的语言数据。基于双层序列输入,我们可以设计搭建一个灵活的、层次化的RNN,分别从词语和句子级别编码输入数据,同时也能够引入更加复杂的记忆机制,更好地完成一些复杂的语言理解任务。 + +在PaddlePaddle中,`recurrent_group`是一种任意复杂的RNN单元,用户只需定义RNN在一个时间步内完成的计算,PaddlePaddle负责完成信息和误差在时间序列上的传播。 + +更进一步,`recurrent_group`同样可以扩展到双层序列的处理上。通过两个嵌套的`recurrent_group`分别定义子句级别和词语级别上需要完成的运算,最终实现一个层次化的复杂RNN。 + +目前,在PaddlePaddle中,能够对双向序列进行处理的有`recurrent_group`和部分Layer,具体可参考文档:支持双层序列作为输入的Layer。 + +## 相关概念 + +### 基本原理 +`recurrent_group` 是PaddlePaddle支持的一种任意复杂的RNN单元。使用者只需要关注于设计RNN在一个时间步之内完成的计算,PaddlePaddle负责完成信息和梯度在时间序列上的传播。 + +PaddlePaddle中,`recurrent_group`的一个简单调用如下: + +``` python +recurrent_group(step, input, reverse) +``` +- step:一个可调用的函数,定义一个时间步之内RNN单元完成的计算 +- input:输入,必须是一个单层序列,或者一个双层序列 +- reverse:是否以逆序处理输入序列 + +使用`recurrent_group`的核心是设计step函数的计算逻辑。step函数内部可以自由组合PaddlePaddle支持的各种layer,完成任意的运算逻辑。`recurrent_group` 的输入(即input)会成为step函数的输入,由于step 函数只关注于RNN一个时间步之内的计算,在这里`recurrent_group`替我们完成了原始输入数据的拆分。 + +### 输入 +`recurrent_group`处理的输入序列主要分为以下三种类型: + +- **数据输入**:一个双层序列进入`recurrent_group`会被拆解为一个单层序列,一个单层序列进入`recurrent_group`会被拆解为非序列,然后交给step函数,这一过程对用户是完全透明的。可以有以下两种:1)通过data_layer拿到的用户输入;2)其它layer的输出。 + +- **只读Memory输入**:`StaticInput` 定义了一个只读的Memory,由`StaticInput`指定的输入不会被`recurrent_group`拆解,`recurrent_group` 循环展开的每个时间步总是能够引用所有输入,可以是一个非序列,或者一个单层序列。 + +- **序列生成任务的输入**:`GeneratedInput`只用于在序列生成任务中指定输入数据。 + +### 输入示例 + +序列生成任务大多遵循encoder-decoer架构,encoder和decoder可以是能够处理序列的任意神经网络单元,而RNN是最流行的选择。 + +给定encoder输出和当前词,decoder每次预测产生下一个最可能的词语。在这种结构中,decoder接受两个输入: + +- 要生成的目标序列:是decoder的数据输入,也是decoder循环展开的依据,`recurrent_group`会对这类输入进行拆解。 + +- encoder输出,可以是一个非序列,或者一个单层序列:是一个unbounded memory,decoder循环展开的每一个时间步会引用全部结果,不应该被拆解,这种类型的输入必须通过`StaticInput`指定。关于Unbounded Memory的更多讨论请参考论文 [Neural Turning Machine](https://arxiv.org/abs/1410.5401)。 + +在序列生成任务中,decoder RNN总是引用上一时刻预测出的词的词向量,作为当前时刻输入。`GeneratedInput`自动完成这一过程。 + +### 输出 +`step`函数必须返回一个或多个Layer的输出,这个Layer的输出会作为整个`recurrent_group` 最终的输出结果。在输出的过程中,`recurrent_group` 会将每个时间步的输出拼接,这个过程对用户也是透明的。 + +### memory +memory只能在`recurrent_group`中定义和使用。memory不能独立存在,必须指向一个PaddlePaddle定义的Layer。引用memory得到这layer上一时刻输出,因此,可以将memory理解为一个时延操作。 + +可以显示地指定一个layer的输出用于初始化memory。不指定时,memory默认初始化为0。 + +## 双层RNN介绍 +`recurrent_group`帮助我们完成对输入序列的拆分,对输出的合并,以及计算逻辑在序列上的循环展开。 + +利用这种特性,两个嵌套的`recurrent_group`能够处理双层序列,实现词语和句子两个级别的双层RNN结构。 + +- 单层(word-level)RNN:每个状态(state)对应一个词(word)。 +- 双层(sequence-level)RNN:一个双层RNN由多个单层RNN组成,每个单层RNN(即双层RNN的每个状态)对应一个子句(subseq)。 + +为了描述方便,下文以NLP任务为例,将含有子句(subseq)的段落定义为一个双层序列,将含有词语的句子定义为一个单层序列,那么0层序列即为一个词语。 + +## 双层RNN的使用 + +### 训练流程的使用方法 +使用 `recurrent_group`需要遵循以下约定: + +- **单进单出**:输入和输出都是单层序列。 + - 如果有多个输入,不同输入序列含有的词语数必须严格相等。 + - 输出一个单层序列,输出序列的词语数和输入序列一致。 + - memory:在step函数中定义 memory指向一个layer,通过引用memory得到这个layer上一个时刻输出,形成recurrent 连接。memory的is_seq参数必须为false。如果没有定义memory,每个时间步之内的运算是独立的。 + - boot_layer:memory的初始状态,默认初始状为0,memory的is_seq参数必须为false。 + +- **双进双出**:输入和输出都是双层序列。 + - 如果有多个输入序列,不同输入含有的子句(subseq)数必须严格相等,但子句含有的词语数可以不相等。 + - 输出一个双层序列,子句(subseq)数、子句的单词数和指定的一个输入序列一致,默认为第一个输入。 + - memory:在step函数中定义memory,指向一个layer,通过引用memory得到这个layer上一个时刻的输出,形成recurrent连接。定义在外层`recurrent_group` step函数中的memory,能够记录上一个subseq 的状态,可以是一个单层序列(只作为read-only memory),也可以是一个词语。如果没有定义memory,那么 subseq 之间的运算是独立的。 + - boot_layer:memory 初始状态,可以是一个单层序列(只作为read-only memory)或一个向量。默认不设置,即初始状态为0。 + +- **双进单出**:目前还未支持,会报错"In hierachical RNN, all out links should be from sequences now"。 + + +### 生成流程的使用方法 +使用`beam_search`需要遵循以下约定: + +- 单层RNN:从一个word生成下一个word。 - 双层RNN:即把单层RNN生成后的subseq给拼接成一个新的双层seq。从语义上看,也不存在一个subseq直接生成下一个subseq的情况。 diff --git a/doc/howto/deep_model/rnn/rnn_en.rst b/doc/howto/deep_model/rnn/rnn_en.rst index b4c0c8bb4cf063872abc783932df737642fb9178..73f5d5371fcd3ce95253cad47b0d8e738284441c 100644 --- a/doc/howto/deep_model/rnn/rnn_en.rst +++ b/doc/howto/deep_model/rnn/rnn_en.rst @@ -30,7 +30,7 @@ Then at the :code:`process` function, each :code:`yield` function will return th yield src_ids, trg_ids, trg_ids_next -For more details description of how to write a data provider, please refer to :ref:`api_pydataprovider2_en` . The full data provider file is located at :code:`demo/seqToseq/dataprovider.py`. +For more details description of how to write a data provider, please refer to :ref:`api_pydataprovider2` . The full data provider file is located at :code:`demo/seqToseq/dataprovider.py`. =============================================== Configure Recurrent Neural Network Architecture @@ -42,8 +42,8 @@ Simple Gated Recurrent Neural Network Recurrent neural network process a sequence at each time step sequentially. An example of the architecture of LSTM is listed below. -.. image:: ../../../tutorials/sentiment_analysis/bi_lstm.jpg - :align: center +.. image:: ../../../tutorials/sentiment_analysis/src/bi_lstm.jpg + :align: center Generally speaking, a recurrent network perform the following operations from :math:`t=1` to :math:`t=T`, or reversely from :math:`t=T` to :math:`t=1`. @@ -102,7 +102,7 @@ Sequence to Sequence Model with Attention We will use the sequence to sequence model with attention as an example to demonstrate how you can configure complex recurrent neural network models. An illustration of the sequence to sequence model with attention is shown in the following figure. .. image:: ../../../tutorials/text_generation/encoder-decoder-attention-model.png - :align: center + :align: center In this model, the source sequence :math:`S = \{s_1, \dots, s_T\}` is encoded with a bidirectional gated recurrent neural networks. The hidden states of the bidirectional gated recurrent neural network :math:`H_S = \{H_1, \dots, H_T\}` is called *encoder vector* The decoder is a gated recurrent neural network. When decoding each token :math:`y_t`, the gated recurrent neural network generates a set of weights :math:`W_S^t = \{W_1^t, \dots, W_T^t\}`, which are used to compute a weighted sum of the encoder vector. The weighted sum of the encoder vector is utilized to condition the generation of the token :math:`y_t`. @@ -246,6 +246,6 @@ The code is listed below: outputs(beam_gen) -Notice that this generation technique is only useful for decoder like generation process. If you are working on sequence tagging tasks, please refer to :ref:`semantic_role_labeling_en` for more details. +Notice that this generation technique is only useful for decoder like generation process. If you are working on sequence tagging tasks, please refer to :ref:`semantic_role_labeling` for more details. The full configuration file is located at :code:`demo/seqToseq/seqToseq_net.py`. diff --git a/doc_cn/algorithm/rnn/glossary_rnn.dot b/doc/howto/deep_model/rnn/src/glossary_rnn.dot similarity index 100% rename from doc_cn/algorithm/rnn/glossary_rnn.dot rename to doc/howto/deep_model/rnn/src/glossary_rnn.dot diff --git a/doc_cn/algorithm/rnn/glossary_rnn_with_memory.dot b/doc/howto/deep_model/rnn/src/glossary_rnn_with_memory.dot similarity index 100% rename from doc_cn/algorithm/rnn/glossary_rnn_with_memory.dot rename to doc/howto/deep_model/rnn/src/glossary_rnn_with_memory.dot diff --git a/doc_cn/algorithm/rnn/simple_full_hierarchical_recurrent.dot b/doc/howto/deep_model/rnn/src/simple_full_hierarchical_recurrent.dot similarity index 100% rename from doc_cn/algorithm/rnn/simple_full_hierarchical_recurrent.dot rename to doc/howto/deep_model/rnn/src/simple_full_hierarchical_recurrent.dot diff --git a/doc_cn/algorithm/rnn/simple_full_recurrent.dot b/doc/howto/deep_model/rnn/src/simple_full_recurrent.dot similarity index 100% rename from doc_cn/algorithm/rnn/simple_full_recurrent.dot rename to doc/howto/deep_model/rnn/src/simple_full_recurrent.dot diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..4706d9339aee07fba876728f51fdbd9623cca973 --- /dev/null +++ b/doc/howto/index_cn.rst @@ -0,0 +1,27 @@ +HOW TO +======= + +Usage +------- + +.. toctree:: + :maxdepth: 1 + + concepts/use_concepts_cn.rst + cluster/k8s/paddle_on_k8s_cn.md + cluster/k8s/distributed_training_on_k8s_cn.md + +Development +------------ + +.. toctree:: + :maxdepth: 1 + + write_docs/index_cn.rst + deep_model/index_cn.rst + +Optimization +------------- + +.. toctree:: + :maxdepth: 1 diff --git a/doc_cn/howto/how_to_write_docs/index.rst b/doc/howto/write_docs/index_cn.rst similarity index 100% rename from doc_cn/howto/how_to_write_docs/index.rst rename to doc/howto/write_docs/index_cn.rst diff --git a/doc/index_cn.rst b/doc/index_cn.rst new file mode 100644 index 0000000000000000000000000000000000000000..460fedb5658a8ea9bbe8b602ee2b5df66502fa62 --- /dev/null +++ b/doc/index_cn.rst @@ -0,0 +1,11 @@ +PaddlePaddle 文档 +====================== + +.. toctree:: + :maxdepth: 1 + + getstarted/index_cn.rst + tutorials/index_cn.md + howto/index_cn.rst + api/index_cn.rst + faq/index_cn.rst diff --git a/doc/index.rst b/doc/index_en.rst similarity index 88% rename from doc/index.rst rename to doc/index_en.rst index c107239438b038fb6a4a6123e9b61f424b60142f..1d9cca7de720ebc23fe816f32d158930d91c07e7 100644 --- a/doc/index.rst +++ b/doc/index_en.rst @@ -8,4 +8,5 @@ PaddlePaddle Documentation tutorials/index_en.md howto/index_en.rst api/index_en.rst - about/index_en.rst + about/index_en.rst + \ No newline at end of file diff --git a/doc/tutorials/image_classification/src/cifar.png b/doc/tutorials/image_classification/src/cifar.png new file mode 100644 index 0000000000000000000000000000000000000000..f54a0c58837cb3385b32dc57d02cec92666ef0f1 Binary files /dev/null and b/doc/tutorials/image_classification/src/cifar.png differ diff --git a/doc/tutorials/image_classification/src/image_classification.png b/doc/tutorials/image_classification/src/image_classification.png new file mode 100644 index 0000000000000000000000000000000000000000..14f255805081c1b4fab27eaf336fd389fa93ca19 Binary files /dev/null and b/doc/tutorials/image_classification/src/image_classification.png differ diff --git a/doc/tutorials/image_classification/src/lenet.png b/doc/tutorials/image_classification/src/lenet.png new file mode 100644 index 0000000000000000000000000000000000000000..1e6f2b32bad797f3fccb929c72a121fc935b0cbb Binary files /dev/null and b/doc/tutorials/image_classification/src/lenet.png differ diff --git a/doc/tutorials/image_classification/src/plot.png b/doc/tutorials/image_classification/src/plot.png new file mode 100644 index 0000000000000000000000000000000000000000..a31f99791c670e18bb8c62b7604ec8cb0284ffb4 Binary files /dev/null and b/doc/tutorials/image_classification/src/plot.png differ diff --git a/doc/tutorials/imagenet_model/resnet_model_cn.md b/doc/tutorials/imagenet_model/resnet_model_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..82ec9d70b345c11aba3aa86f8206eedc8072bb88 --- /dev/null +++ b/doc/tutorials/imagenet_model/resnet_model_cn.md @@ -0,0 +1,284 @@ +# Model Zoo - ImageNet # + +[ImageNet](http://www.image-net.org/) 是通用物体分类领域一个众所周知的数据库。本教程提供了一个用于ImageNet上的卷积分类网络模型。 + +## ResNet 介绍 + +论文 [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385) 中提出的ResNet网络结构在2015年ImageNet大规模视觉识别竞赛(ILSVRC 2015)的分类任务中赢得了第一名。他们提出残差学习的框架来简化网络的训练,所构建网络结构的的深度比之前使用的网络有大幅度的提高。下图展示的是基于残差的连接方式。左图构造网络模块的方式被用于34层的网络中,而右图的瓶颈连接模块用于50层,101层和152层的网络结构中。 + +
![resnet_block](./resnet_block.jpg)
+
图 1. ResNet 网络模块
+ +本教程中我们给出了三个ResNet模型,这些模型都是由原作者提供的模型转换过来的。我们使用PaddlePaddle在ILSVRC的验证集共50,000幅图像上测试了模型的分类错误率,其中输入图像的颜色通道顺序为**BGR**,保持宽高比缩放到短边为256,只截取中心方形的图像区域。分类错误率和模型大小由下表给出。 +
+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + +
ResNetTop-1Model Size
ResNet-5024.9%99M
ResNet-10123.7%173M
ResNet-15223.2%234M
+
+ +## ResNet 模型 + +50层,101层和152层的网络配置文件可参照```demo/model_zoo/resnet/resnet.py```。你也可以通过在命令行参数中增加一个参数如```--config_args=layer_num=50```来指定网络层的数目。 + +### 网络可视化 + +你可以通过执行下面的命令来得到ResNet网络的结构可视化图。该脚本会生成一个dot文件,然后可以转换为图片。需要安装graphviz来转换dot文件为图片。 + +``` +cd demo/model_zoo/resnet +./net_diagram.sh +``` + +### 模型下载 + +``` +cd demo/model_zoo/resnet +./get_model.sh +``` +你可以执行上述命令来下载所有的模型和均值文件,如果下载成功,这些文件将会被保存在```demo/model_zoo/resnet/model```路径下。 + +``` +mean_meta_224 resnet_101 resnet_152 resnet_50 +``` + * resnet_50: 50层网络模型。 + * resnet_101: 101层网络模型。 + * resnet_152: 152层网络模型。 + * mean\_meta\_224: 均值图像文件,图像大小为3 x 224 x 224,颜色通道顺序为**BGR**。你也可以使用这三个值: 103.939, 116.779, 123.68。 + +### 参数信息 + +* **卷积层权重** + + 由于每个卷积层后面连接的是batch normalization层,因此该层中没有偏置(bias)参数,并且只有一个权重。 + 形状: `(Co, ky, kx, Ci)` + * Co: 输出特征图的通道数目 + * ky: 滤波器核在垂直方向上的尺寸 + * kx: 滤波器核在水平方向上的尺寸 + * Ci: 输入特征图的通道数目 + + 二维矩阵: (Co * ky * kx, Ci), 行优先次序存储。 + +* **全连接层权重** + + 二维矩阵: (输入层尺寸, 本层尺寸), 行优先次序存储。 + +* **[Batch Normalization]() 层权重** + +本层有四个参数,实际上只有.w0和.wbias是需要学习的参数,另外两个分别是滑动均值和方差。在测试阶段它们将会被加载到模型中。下表展示了batch normalization层的参数。 +
+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
参数名尺寸含义
_res2_1_branch1_bn.w0256gamma, 缩放参数
_res2_1_branch1_bn.w1256特征图均值
_res2_1_branch1_bn.w2256特征图方差
_res2_1_branch1_bn.wbias256beta, 偏置参数
+
+ +### 参数读取 + +使用者可以使用下面的Python脚本来读取参数值: + +``` +import sys +import numpy as np + +def load(file_name): + with open(file_name, 'rb') as f: + f.read(16) # skip header for float type. + return np.fromfile(f, dtype=np.float32) + +if __name__=='__main__': + weight = load(sys.argv[1]) +``` + +或者直接使用下面的shell命令: + +``` +od -j 16 -f _res2_1_branch1_bn.w0 +``` + +## 特征提取 + +我们提供了C++和Python接口来提取特征。下面的例子使用了`demo/model_zoo/resnet/example`中的数据,详细地展示了整个特征提取的过程。 + +### C++接口 + +首先,在配置文件中的`define_py_data_sources2`里指定图像数据列表,具体请参照示例`demo/model_zoo/resnet/resnet.py`。 + +``` + train_list = 'train.list' if not is_test else None + # mean.meta is mean file of ImageNet dataset. + # mean.meta size : 3 x 224 x 224. + # If you use three mean value, set like: + # "mean_value:103.939,116.779,123.68;" + args={ + 'mean_meta': "model/mean_meta_224/mean.meta", + 'image_size': 224, 'crop_size': 224, + 'color': True,'swap_channel:': [2, 1, 0]} + define_py_data_sources2(train_list, + 'example/test.list', + module="example.image_list_provider", + obj="processData", + args=args) +``` + +第二步,在`resnet.py`文件中指定要提取特征的网络层的名字。例如, + +``` +Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn") +``` + +第三步,在`extract_fea_c++.sh`文件中指定模型路径和输出的目录,然后执行下面的命令。 + +``` +cd demo/model_zoo/resnet +./extract_fea_c++.sh +``` + +如果执行成功,特征将会存到`fea_output/rank-00000`文件中,如下所示。同时你可以使用`load_feature.py`文件中的`load_feature_c`接口来加载该文件。 + +``` +-0.115318 -0.108358 ... -0.087884;-1.27664 ... -1.11516 -2.59123; +-0.126383 -0.116248 ... -0.00534909;-1.42593 ... -1.04501 -1.40769; +``` + +* 每行存储的是一个样本的特征。其中,第一行存的是图像`example/dog.jpg`的特征,第二行存的是图像`example/cat.jpg`的特征。 +* 不同层的特征由分号`;`隔开,并且它们的顺序与`Outputs()`中指定的层顺序一致。这里,左边是`res5_3_branch2c_conv`层的特征,右边是`res5_3_branch2c_bn`层特征。 + +### Python接口 + +示例`demo/model_zoo/resnet/classify.py`中展示了如何使用Python来提取特征。下面的例子同样使用了`./example/test.list`中的数据。执行的命令如下: + +``` +cd demo/model_zoo/resnet +./extract_fea_py.sh +``` + +extract_fea_py.sh: + +``` +python classify.py \ + --job=extract \ + --conf=resnet.py\ + --use_gpu=1 \ + --mean=model/mean_meta_224/mean.meta \ + --model=model/resnet_50 \ + --data=./example/test.list \ + --output_layer="res5_3_branch2c_conv,res5_3_branch2c_bn" \ + --output_dir=features + +``` +* \--job=extract: 指定工作模式来提取特征。 +* \--conf=resnet.py: 网络配置文件。 +* \--use_gpu=1: 指定是否使用GPU。 +* \--model=model/resnet_50: 模型路径。 +* \--data=./example/test.list: 数据列表。 +* \--output_layer="xxx,xxx": 指定提取特征的层。 +* \--output_dir=features: 输出目录。 + +如果运行成功,你将会看到特征存储在`features/batch_0`文件中,该文件是由cPickle产生的。你可以使用`load_feature.py`中的`load_feature_py`接口来打开该文件,它将返回如下的字典: + +``` +{ +'cat.jpg': {'res5_3_branch2c_conv': array([[-0.12638293, -0.116248 , -0.11883899, ..., -0.00895038, 0.01994277, -0.00534909]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.42593431, -1.28918779, -1.32414699, ..., -1.45933616, -1.04501402, -1.40769434]], dtype=float32)}, +'dog.jpg': {'res5_3_branch2c_conv': array([[-0.11531784, -0.10835785, -0.08809858, ...,0.0055237, 0.01505112, -0.08788397]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.27663755, -1.18272924, -0.90937918, ..., -1.25178063, -1.11515927, -2.59122872]], dtype=float32)} +} +``` + +仔细观察,这些特征值与上述使用C++接口提取的结果是一致的。 + +## 预测 + +`classify.py`文件也可以用于对样本进行预测。我们提供了一个示例脚本`predict.sh`,它使用50层的ResNet模型来对`example/test.list`中的数据进行预测。 + +``` +cd demo/model_zoo/resnet +./predict.sh +``` + +predict.sh调用了`classify.py`: + +``` +python classify.py \ + --job=predict \ + --conf=resnet.py\ + --multi_crop \ + --model=model/resnet_50 \ + --use_gpu=1 \ + --data=./example/test.list +``` +* \--job=extract: 指定工作模型进行预测。 +* \--conf=resnet.py: 网络配置文件。network configure. +* \--multi_crop: 使用10个裁剪图像块,预测概率取平均。 +* \--use_gpu=1: 指定是否使用GPU。 +* \--model=model/resnet_50: 模型路径。 +* \--data=./example/test.list: 数据列表。 + +如果运行成功,你将会看到如下结果,其中156和285是这些图像的分类标签。 + +``` +Label of example/dog.jpg is: 156 +Label of example/cat.jpg is: 282 +``` diff --git a/doc/tutorials/imagenet_model/resnet_model_en.md b/doc/tutorials/imagenet_model/resnet_model_en.md index 5403ab9f17d2399fee878d0f3c512cb166aba06f..478ad06193b14ba7fe02238df621db1f7b0804d4 100644 --- a/doc/tutorials/imagenet_model/resnet_model_en.md +++ b/doc/tutorials/imagenet_model/resnet_model_en.md @@ -52,7 +52,7 @@ See ```demo/model_zoo/resnet/resnet.py```. This config contains network of 50, 1 ### Network Visualization -You can get a diagram of ResNet network by running the following commands. The script generates dot file and then converts dot file to PNG file, which uses installed draw_dot tool in our server. If you can not access the server, just install graphviz to convert dot file. +You can get a diagram of ResNet network by running the following commands. The script generates dot file and then converts dot file to PNG file, which needs to install graphviz to convert. ``` cd demo/model_zoo/resnet @@ -138,7 +138,7 @@ There are four parameters in this layer. In fact, only .w0 and .wbias are the le ### Parameter Observation -Users who want to observe the parameters can use python to read: +Users who want to observe the parameters can use Python to read: ``` import sys @@ -209,7 +209,7 @@ If successful, features are saved in `fea_output/rank-00000` as follows. And you ### Python Interface -`demo/model_zoo/resnet/classify.py` is an example to show how to use python to extract features. Following example still uses data of `./example/test.list`. Command is as follows: +`demo/model_zoo/resnet/classify.py` is an example to show how to use Python to extract features. Following example still uses data of `./example/test.list`. Command is as follows: ``` cd demo/model_zoo/resnet @@ -238,8 +238,6 @@ python classify.py \ * \--output_layer="xxx,xxx": specify layers to extract features. * \--output_dir=features: output diretcoty. -Note, since the convolution layer in these ResNet models is suitable for the cudnn implementation which only support GPU. It not support CPU mode because of compatibility issue and we will fix later. - If run successfully, you will see features saved in `features/batch_0`, this file is produced with cPickle. You can use `load_feature_py` interface in `load_feature.py` to open the file, and it returns a dictionary as follows: ``` diff --git a/doc/tutorials/index_cn.md b/doc/tutorials/index_cn.md new file mode 100644 index 0000000000000000000000000000000000000000..fddaee5b2d1be39917c18d992b56dac0daff194a --- /dev/null +++ b/doc/tutorials/index_cn.md @@ -0,0 +1,23 @@ +# TUTORIALS +There are several examples and demos here. + +## Quick Start + +* [Quick Start](quick_start/index_cn.rst) + +## Image + +* TBD + +## NLP + +* [Sentiment Analysis](sentiment_analysis/index_cn.md) +* [Semantic Role Labeling](semantic_role_labeling/index_cn.rst) + +## Recommendation + +* TBD + +## Model Zoo + +* TBD diff --git a/doc/tutorials/index_en.md b/doc/tutorials/index_en.md index 97de356665d23543ddc241552c6e3c896a78db86..039ec4b4a45d06ad9875796a4dea40da252838b4 100644 --- a/doc/tutorials/index_en.md +++ b/doc/tutorials/index_en.md @@ -1,7 +1,9 @@ # TUTORIALS -There are serveral examples and demos here. +There are several examples and demos here. -## [Quick Start](quick_start/index_en.md) +## Quick Start + +* [Quick Start](quick_start/index_en.md) ## Image diff --git a/doc_cn/demo/quick_start/index.rst b/doc/tutorials/quick_start/index_cn.rst similarity index 87% rename from doc_cn/demo/quick_start/index.rst rename to doc/tutorials/quick_start/index_cn.rst index 0536936dc47689d3ff285b919586a10128a0c745..754c2f6212527f7c3c655b9a7f341230e24084e3 100644 --- a/doc_cn/demo/quick_start/index.rst +++ b/doc/tutorials/quick_start/index_cn.rst @@ -21,7 +21,7 @@ PaddlePaddle快速入门教程 使用PaddlePaddle, 每一个任务流程都可以被划分为如下五个步骤。 - .. image:: Pipeline.jpg + .. image:: src/Pipeline_cn.jpg :align: center :scale: 80% @@ -99,7 +99,7 @@ Python脚本读取数据 本小节我们将介绍模型网络结构。 - .. image:: PipelineNetwork.jpg + .. image:: src/PipelineNetwork_cn.jpg :align: center :scale: 80% @@ -112,7 +112,7 @@ Python脚本读取数据 具体流程如下: - .. image:: NetLR.jpg + .. image:: src/NetLR_cn.jpg :align: center :scale: 80% @@ -147,9 +147,9 @@ Python脚本读取数据 **效果总结**:我们将在后面介绍训练和预测流程的脚本。在此为方便对比不同网络结构,我们总结了各个网络的复杂度和效果。 ===================== =============================== ================= - 网络名称 参数数量 错误率 + 网络名称 参数数量 错误率 ===================== =============================== ================= - 逻辑回归 252 KB 8.652 % + 逻辑回归 252 KB 8.652 % ===================== =============================== ================= 词向量模型 @@ -176,7 +176,7 @@ embedding模型需要稍微改变提供数据的Python脚本,即 ``dataprovide 该模型依然使用逻辑回归分类网络的框架, 只是将句子用连续向量表示替换为用稀疏向量表示, 即对第三步进行替换。句子表示的计算更新为两步: -.. image:: NetContinuous.jpg +.. image:: src/NetContinuous_cn.jpg :align: center :scale: 80% @@ -197,9 +197,9 @@ embedding模型需要稍微改变提供数据的Python脚本,即 ``dataprovide **效果总结:** ===================== =============================== ================== - 网络名称 参数数量 错误率 + 网络名称 参数数量 错误率 ===================== =============================== ================== - 词向量模型 15 MB 8.484 % + 词向量模型 15 MB 8.484 % ===================== =============================== ================== 卷积模型 @@ -207,7 +207,7 @@ embedding模型需要稍微改变提供数据的Python脚本,即 ``dataprovide 卷积网络是一种特殊的从词向量表示到句子表示的方法, 也就是将词向量模型进一步演化为三个新步骤。 -.. image:: NetConv.jpg +.. image:: src/NetConv_cn.jpg :align: center :scale: 80% @@ -230,15 +230,15 @@ embedding模型需要稍微改变提供数据的Python脚本,即 ``dataprovide **效果总结:** ===================== =============================== ======================== - 网络名称 参数数量 错误率 + 网络名称 参数数量 错误率 ===================== =============================== ======================== - 卷积模型 16 MB 5.628 % + 卷积模型 16 MB 5.628 % ===================== =============================== ======================== 时序模型 ---------- -.. image:: NetRNN.jpg +.. image:: src/NetRNN_cn.jpg :align: center :scale: 80% @@ -260,9 +260,9 @@ embedding模型需要稍微改变提供数据的Python脚本,即 ``dataprovide 本次试验,我们采用单层LSTM模型,并使用了Dropout,**效果总结:** ===================== =============================== ========================= - 网络名称 参数数量 错误率 + 网络名称 参数数量 错误率 ===================== =============================== ========================= - 时序模型 16 MB 4.812 % + 时序模型 16 MB 4.812 % ===================== =============================== ========================= 优化算法 @@ -284,7 +284,7 @@ Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优 在数据加载和网络配置完成之后, 我们就可以训练模型了。 -.. image:: PipelineTrain.jpg +.. image:: src/PipelineTrain_cn.jpg :align: center :scale: 80% @@ -294,7 +294,7 @@ Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优 ./train.sh -``train.sh``中包含了训练模型的基本命令。训练时所需设置的主要参数如下: +``train.sh`` 中包含了训练模型的基本命令。训练时所需设置的主要参数如下: .. code-block:: bash @@ -312,7 +312,7 @@ Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优 当模型训练好了之后,我们就可以进行预测了。 -.. image:: PipelineTest.jpg +.. image:: src/PipelineTest_cn.jpg :align: center :scale: 80% @@ -348,12 +348,12 @@ Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优 对于Amazon-Elec测试集(25k), 如下表格,展示了上述网络模型的训练效果: ===================== =============================== ============= ================================== - 网络名称 参数数量 错误率 配置文件 + 网络名称 参数数量 错误率 配置文件 ===================== =============================== ============= ================================== - 逻辑回归模型 252 KB 8.652% trainer_config.lr.py - 词向量模型 15 MB 8.484% trainer_config.emb.py + 逻辑回归模型 252 KB 8.652% trainer_config.lr.py + 词向量模型 15 MB 8.484% trainer_config.emb.py 卷积模型 16 MB 5.628% trainer_config.cnn.py - 时序模型 16 MB 4.812% trainer_config.lstm.py + 时序模型 16 MB 4.812% trainer_config.lstm.py ===================== =============================== ============= ================================== @@ -384,12 +384,12 @@ Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优 模型训练会看到类似上面这样的日志信息,详细的参数解释,请参考如下表格: =========================================== ============================================================== - 名称 解释 + 名称 解释 =========================================== ============================================================== - Batch=20 表示过了20个batch - samples=2560 表示过了2560个样本 - AvgCost 每个pass的第0个batch到当前batch所有样本的平均cost - CurrentCost 当前log_period个batch所有样本的平均cost - Eval: classification_error_evaluator 每个pass的第0个batch到当前batch所有样本的平均分类错误率 - CurrentEval: classification_error_evaluator 当前log_period个batch所有样本的平均分类错误率 + Batch=20 表示过了20个batch + samples=2560 表示过了2560个样本 + AvgCost 每个pass的第0个batch到当前batch所有样本的平均cost + CurrentCost 当前log_period个batch所有样本的平均cost + Eval: classification_error_evaluator 每个pass的第0个batch到当前batch所有样本的平均分类错误率 + CurrentEval: classification_error_evaluator 当前log_period个batch所有样本的平均分类错误率 =========================================== ============================================================== diff --git a/doc/tutorials/quick_start/index_en.md b/doc/tutorials/quick_start/index_en.md index 29637293fad79f3c3b9aabe83b71758b471b9338..4e765b23037d8b4b717d12437f839cc488badf5b 100644 --- a/doc/tutorials/quick_start/index_en.md +++ b/doc/tutorials/quick_start/index_en.md @@ -32,7 +32,7 @@ The monitor breaks down two months after purchase. the classifier should output “negative“. To build your text classification system, your code will need to perform five steps: -
![](./Pipeline_en.jpg)
+
![](./src/Pipeline_en.jpg)
- Preprocess data into a standardized format. - Provide data to the learning model. @@ -160,14 +160,14 @@ You can refer to the following link for more detailed examples and data formats: ## Network Architecture You will describe four kinds of network architectures in this section. -
![](./PipelineNetwork_en.jpg)
+
![](./src/PipelineNetwork_en.jpg)
First, you will build a logistic regression model. Later, you will also get chance to build other more powerful network architectures. For more detailed documentation, you could refer to: layer documentation. All configuration files are in `demo/quick_start` directory. ### Logistic Regression The architecture is illustrated in the following picture: -
![](./NetLR_en.png)
+
![](./src/NetLR_en.png)
- You need define the data for text features. The size of the data layer is the number of words in the dictionary. @@ -182,10 +182,10 @@ label = data_layer(name="label", size=label_dim) ``` - It uses logistic regression model to classify the vector, and it will output the classification error during training. - - Each layer has an *input* argument that specifies its input layer. Some layers can have multiple input layers. You can use a list of the input layers as input in that case. - - *size* for each layer means the number of neurons of the layer. - - *act_type* means activation function applied to the output of each neuron independently. - - Some layers can have additional special inputs. For example, `classification_cost` needs ground truth label as input to compute classification loss and error. + - Each layer has an *input* argument that specifies its input layer. Some layers can have multiple input layers. You can use a list of the input layers as input in that case. + - *size* for each layer means the number of neurons of the layer. + - *act_type* means activation function applied to the output of each neuron independently. + - Some layers can have additional special inputs. For example, `classification_cost` needs ground truth label as input to compute classification loss and error. ```python # Define a fully connected layer with logistic activation (also called softmax activation). output = fc_layer(input=word, @@ -240,7 +240,7 @@ def process(settings, file_name): ``` This model is very similar to the framework of logistic regression, but it uses word embedding vectors instead of a sparse vectors to represent words. -
![](./NetContinuous_en.png)
+
![](./src/NetContinuous_en.png)
- It can look up the dense word embedding vector in the dictionary (its words embedding vector is `word_dim`). The input is a sequence of N words, the output is N word_dim dimensional vectors. @@ -283,7 +283,7 @@ The performance is summarized in the following table: ### Convolutional Neural Network Model Convolutional neural network converts a sequence of word embeddings into a sentence representation using temporal convolutions. You will transform the fully connected layer of the word embedding model to 3 new sub-steps. -
![](./NetConv_en.png)
+
![](./src/NetConv_en.png)
Text convolution has 3 steps: @@ -295,8 +295,8 @@ Text convolution has 3 steps: # context_len means convolution kernel size. # context_start means the start of the convolution. It can be negative. In that case, zero padding is applied. text_conv = sequence_conv_pool(input=emb, - context_start=k, - context_len=2 * k + 1) + context_start=k, + context_len=2 * k + 1) ``` The performance is summarized in the following table: @@ -324,7 +324,7 @@ The performance is summarized in the following table:
### Recurrent Model -
![](./NetRNN_en.png)
+
![](./src/NetRNN_en.png)
You can use Recurrent neural network as our time sequence model, including simple RNN model, GRU model, and LSTM model。 @@ -378,7 +378,7 @@ settings(batch_size=128, ## Training Model After completing data preparation and network architecture specification, you will run the training script. -
![](./PipelineTrain_en.png)
+
![](./src/PipelineTrain_en.png)
Training script: our training script is in `train.sh` file. The training arguments are listed below: @@ -395,7 +395,7 @@ We do not provide examples on how to train on clusters here. If you want to trai ## Inference You can use the trained model to perform prediction on the dataset with no labels. You can also evaluate the model on dataset with labels to obtain its test accuracy. -
![](./PipelineTest_en.png)
+
![](./src/PipelineTest_en.png)
The test script is listed below. PaddlePaddle can evaluate a model on the data with labels specified in `test.list`. diff --git a/doc_cn/demo/quick_start/NetContinuous.jpg b/doc/tutorials/quick_start/src/NetContinuous_cn.jpg similarity index 100% rename from doc_cn/demo/quick_start/NetContinuous.jpg rename to doc/tutorials/quick_start/src/NetContinuous_cn.jpg diff --git a/doc/tutorials/quick_start/NetContinuous_en.png b/doc/tutorials/quick_start/src/NetContinuous_en.png similarity index 100% rename from doc/tutorials/quick_start/NetContinuous_en.png rename to doc/tutorials/quick_start/src/NetContinuous_en.png diff --git a/doc_cn/demo/quick_start/NetConv.jpg b/doc/tutorials/quick_start/src/NetConv_cn.jpg similarity index 100% rename from doc_cn/demo/quick_start/NetConv.jpg rename to doc/tutorials/quick_start/src/NetConv_cn.jpg diff --git a/doc/tutorials/quick_start/NetConv_en.png b/doc/tutorials/quick_start/src/NetConv_en.png similarity index 100% rename from doc/tutorials/quick_start/NetConv_en.png rename to doc/tutorials/quick_start/src/NetConv_en.png diff --git a/doc_cn/demo/quick_start/NetLR.jpg b/doc/tutorials/quick_start/src/NetLR_cn.jpg similarity index 100% rename from doc_cn/demo/quick_start/NetLR.jpg rename to doc/tutorials/quick_start/src/NetLR_cn.jpg diff --git a/doc/tutorials/quick_start/NetLR_en.png b/doc/tutorials/quick_start/src/NetLR_en.png similarity index 100% rename from doc/tutorials/quick_start/NetLR_en.png rename to doc/tutorials/quick_start/src/NetLR_en.png diff --git a/doc_cn/demo/quick_start/NetRNN.jpg b/doc/tutorials/quick_start/src/NetRNN_cn.jpg similarity index 100% rename from doc_cn/demo/quick_start/NetRNN.jpg rename to doc/tutorials/quick_start/src/NetRNN_cn.jpg diff --git a/doc/tutorials/quick_start/NetRNN_en.png b/doc/tutorials/quick_start/src/NetRNN_en.png similarity index 100% rename from doc/tutorials/quick_start/NetRNN_en.png rename to doc/tutorials/quick_start/src/NetRNN_en.png diff --git a/doc_cn/demo/quick_start/PipelineNetwork.jpg b/doc/tutorials/quick_start/src/PipelineNetwork_cn.jpg similarity index 100% rename from doc_cn/demo/quick_start/PipelineNetwork.jpg rename to doc/tutorials/quick_start/src/PipelineNetwork_cn.jpg diff --git a/doc/tutorials/quick_start/PipelineNetwork_en.jpg b/doc/tutorials/quick_start/src/PipelineNetwork_en.jpg similarity index 100% rename from doc/tutorials/quick_start/PipelineNetwork_en.jpg rename to doc/tutorials/quick_start/src/PipelineNetwork_en.jpg diff --git a/doc_cn/demo/quick_start/PipelineTest.jpg b/doc/tutorials/quick_start/src/PipelineTest_cn.jpg similarity index 100% rename from doc_cn/demo/quick_start/PipelineTest.jpg rename to doc/tutorials/quick_start/src/PipelineTest_cn.jpg diff --git a/doc/tutorials/quick_start/PipelineTest_en.png b/doc/tutorials/quick_start/src/PipelineTest_en.png similarity index 100% rename from doc/tutorials/quick_start/PipelineTest_en.png rename to doc/tutorials/quick_start/src/PipelineTest_en.png diff --git a/doc_cn/demo/quick_start/PipelineTrain.jpg b/doc/tutorials/quick_start/src/PipelineTrain_cn.jpg similarity index 100% rename from doc_cn/demo/quick_start/PipelineTrain.jpg rename to doc/tutorials/quick_start/src/PipelineTrain_cn.jpg diff --git a/doc/tutorials/quick_start/PipelineTrain_en.png b/doc/tutorials/quick_start/src/PipelineTrain_en.png similarity index 100% rename from doc/tutorials/quick_start/PipelineTrain_en.png rename to doc/tutorials/quick_start/src/PipelineTrain_en.png diff --git a/doc_cn/demo/quick_start/Pipeline.jpg b/doc/tutorials/quick_start/src/Pipeline_cn.jpg similarity index 100% rename from doc_cn/demo/quick_start/Pipeline.jpg rename to doc/tutorials/quick_start/src/Pipeline_cn.jpg diff --git a/doc/tutorials/quick_start/Pipeline_en.jpg b/doc/tutorials/quick_start/src/Pipeline_en.jpg similarity index 100% rename from doc/tutorials/quick_start/Pipeline_en.jpg rename to doc/tutorials/quick_start/src/Pipeline_en.jpg diff --git a/doc/tutorials/rec/ml_dataset_en.md b/doc/tutorials/rec/ml_dataset_en.md index dc11a5e06031b62d9f86e4dd83a14b2f1a72afc3..25dea5c4afbf1ce1c1ac6195cbd245b116459e2e 100644 --- a/doc/tutorials/rec/ml_dataset_en.md +++ b/doc/tutorials/rec/ml_dataset_en.md @@ -1,6 +1,5 @@ ```eval_rst -.. _demo_ml_dataset_en: - +.. _demo_ml_dataset: ``` # MovieLens Dataset diff --git a/doc/tutorials/rec/ml_regression_en.rst b/doc/tutorials/rec/ml_regression_en.rst index 6346090a84fad71ab9dff21de0dcc536b5760b83..4bb2586e342e7321375e99f760bfd464d0480872 100644 --- a/doc/tutorials/rec/ml_regression_en.rst +++ b/doc/tutorials/rec/ml_regression_en.rst @@ -16,7 +16,7 @@ Data Preparation ```````````````` Download and extract dataset '''''''''''''''''''''''''''' -We use :ref:`demo_ml_dataset_en` here. +We use :ref:`demo_ml_dataset` here. To download and unzip the dataset, simply run the following commands. .. code-block:: bash @@ -264,7 +264,7 @@ In this :code:`dataprovider.py`, we should set\: * use_seq\: Whether this :code:`dataprovider.py` in sequence mode or not. * process\: Return each sample of data to :code:`paddle`. -The data provider details document see :ref:`api_pydataprovider2_en`. +The data provider details document see :ref:`api_pydataprovider2`. Train ````` @@ -280,7 +280,7 @@ The run.sh is shown as follow: It just start a paddle training process, write the log to `log.txt`, then print it on screen. -Each command line argument in :code:`run.sh`, please refer to the :ref:`cmd_line_index_en` page. The short description of these arguments is shown as follow. +Each command line argument in :code:`run.sh`, please refer to the :ref:`cmd_line_index` page. The short description of these arguments is shown as follow. * config\: Tell paddle which file is neural network configuration. * save_dir\: Tell paddle save model into './output' diff --git a/doc/tutorials/semantic_role_labeling/index_cn.md b/doc/tutorials/semantic_role_labeling/index_cn.md index c7e0a78f5071ed0d1702036f4ee0af3881096c68..f6061766c038a7bb6e4ae376685a10cd5669d2ed 100644 --- a/doc/tutorials/semantic_role_labeling/index_cn.md +++ b/doc/tutorials/semantic_role_labeling/index_cn.md @@ -149,7 +149,7 @@ paddle train \ 训练后,模型将保存在目录`output`中。 我们的训练曲线如下:
-![pic](./curve.jpg) +![pic](./src/curve.jpg)
### 测试 diff --git a/doc/tutorials/semantic_role_labeling/index_en.md b/doc/tutorials/semantic_role_labeling/index_en.md index bdd12c0d9abd759d8507a3029f373dc5db6f8f40..92d7c634832119c718711a57c16f69492d405f28 100644 --- a/doc/tutorials/semantic_role_labeling/index_en.md +++ b/doc/tutorials/semantic_role_labeling/index_en.md @@ -1,5 +1,5 @@ ```eval_rst -.. _semantic_role_labeling_en: +.. _semantic_role_labeling: ``` # Semantic Role labeling Tutorial # @@ -45,13 +45,13 @@ Unlike Bidirectional-LSTM that used in Sentiment Analysis demo, the DB-LSTM ado The following figure shows a temporal expanded 2-layer DB-LSTM network.
-![pic](./network_arch.png) +![pic](./src/network_arch.png)
### Features Two input features play an essential role in this pipeline: predicate (pred) and argument (argu). Two other features: predicate context (ctx-p) and region mark (mr) are also adopted. Because a single predicate word can not exactly describe the predicate information, especially when the same words appear more than one times in a sentence. With the predicate context, the ambiguity can be largely eliminated. Similarly, we use region mark mr = 1 to denote the argument position if it locates in the predicate context region, or mr = 0 if does not. These four simple features are all we need for our SRL system. Features of one sample with context size set to 1 is showed as following[2]:
-![pic](./feature.jpg) +![pic](./src/feature.jpg)
In this sample, the coresponding labelled sentence is: @@ -152,7 +152,7 @@ paddle train \ After training, the models will be saved in directory `output`. Our training curve is as following:
-![pic](./curve.jpg) +![pic](./src/curve.jpg)
### Run testing diff --git a/doc/tutorials/semantic_role_labeling/curve.jpg b/doc/tutorials/semantic_role_labeling/src/curve.jpg similarity index 100% rename from doc/tutorials/semantic_role_labeling/curve.jpg rename to doc/tutorials/semantic_role_labeling/src/curve.jpg diff --git a/doc/tutorials/semantic_role_labeling/src/feature.jpg b/doc/tutorials/semantic_role_labeling/src/feature.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e3310e4ace5613917e7779d3198ccbb3cdc5ada Binary files /dev/null and b/doc/tutorials/semantic_role_labeling/src/feature.jpg differ diff --git a/doc/tutorials/semantic_role_labeling/src/network_arch.png b/doc/tutorials/semantic_role_labeling/src/network_arch.png new file mode 100644 index 0000000000000000000000000000000000000000..4ae7864212f2a0a38102ee7ff600527ea99fec82 Binary files /dev/null and b/doc/tutorials/semantic_role_labeling/src/network_arch.png differ diff --git a/doc_cn/demo/sentiment_analysis/sentiment_analysis.md b/doc/tutorials/sentiment_analysis/index_cn.md similarity index 96% rename from doc_cn/demo/sentiment_analysis/sentiment_analysis.md rename to doc/tutorials/sentiment_analysis/index_cn.md index ba307e97e3010629548460e25e894d082a6ddd4e..1323ec1a6abb2e7b5eeb2fbfff9cce5fe78a2c06 100644 --- a/doc_cn/demo/sentiment_analysis/sentiment_analysis.md +++ b/doc/tutorials/sentiment_analysis/index_cn.md @@ -1,325 +1,325 @@ -# 情感分析教程 - -情感分析有许多应用场景。 一个基本的应用场景是区分给定文本的褒贬两极性,给定的文本可以是一个文档、句子、或者是一个小的文本片段。 一个简单的例子如:把用户在购物网站、旅游网站、团购网站(亚马逊、天猫、淘宝等)上发表的评论分成正面评论和负面评论两类。 - -情感分析也常用于基于大量评论和个人博客来监控社会媒体。 例如,研究人员分析了几个关于消费者信心和政治观点的调查,结果发现它们与同时期的Twitter消息中的情绪词频率相关 [1]。 另一个例子是通过分析每日Twitter博客的文本内容来预测股票变动 [2]。 - -另一方面,抓取产品的用户评论并分析他们的情感,有助于理解用户对不同公司,不同产品,甚至不同竞争对手产品的偏好。 - -本教程将指导您完成长期短期记忆(LSTM)网络的训练过程,以分类来自[大型电影评论数据集](http://ai.stanford.edu/~amaas/data/sentiment/)(有时称为[互联网电影数据库 (IMDB)](http://ai.stanford.edu/~amaas/papers/wvSent_acl2011.pdf))的句子的情感 。 此数据集包含电影评论及其相关联的类别标签,即正面和负面。 - -## 数椐准备 - -### IMDB 数椐介绍 - -训练模型之前, 我们需要预处理数椐并构建一个字典。 首先, 你可以使用下面的脚本下载 IMDB 数椐集和[Moses](http://www.statmt.org/moses/)工具, 这是一个基于统计的机器翻译系统. 我们提供了一个数据预处理脚本,它不仅能够处理IMDB数据,还能处理其他用户自定义的数据。 为了使用提前编写的脚本,需要将标记的训练和测试样本移动到另一个路径,这已经在`get_imdb.sh`中完成。 - -``` -cd demo/sentiment/data -./get_imdb.sh -``` -如果数椐获取成功,你将在目录```./demo/sentiment/data```中看到下面的文件: - -``` -aclImdb get_imdb.sh imdb mosesdecoder-master -``` - -* aclImdb: 从外部网站上下载的原始数椐集。 -* imdb: 仅包含训练和测试数椐集。 -* mosesdecoder-master: Moses 工具。 - -IMDB数据集包含25,000个已标注过的高极性电影评论用于训练,25,000个用于测试。负面的评论的得分小于等于4,正面的评论的得大于等于7,总评分10分。 运行完脚本 `./get_imdb.sh`后, 我们可以看到在目录 `aclImdb`中的数椐集的结构如下: - -``` -imdbEr.txt imdb.vocab README test train -``` -* train: 训练数椐集。 -* test : 测试数椐集。 -* imdb.vocab: 字典文件。 -* imdbEr.txt: 字典imdb.vocab中每个切分单词的预期评级。 -* README: 数椐说明文档。 - -测试集和训练集目录包含下面的文件: - -``` -labeledBow.feat neg pos unsup unsupBow.feat urls_neg.txt urls_pos.txt urls_unsup.txt -``` - -* pos: 正面评价样本,包含12,500个txt文件,每个文件是一个电影评论。 -* neg: 负面评价样本,包含12,500个txt文件,每个文件是一个电影评论。 -* unsup: 未标记的评价样本,包含50,000个txt文件。 -* urls_xx.txt: 每个评论的网址。 -* xxBow.feat: 用于统计词频的Bow模型特征。 - -### IMDB 数椐准备 - -在这个例子中,我们只使用已经标注过的训练集和测试集,且默认在训练集上构建字典,而不使用IMDB数椐集中的imdb.vocab做为字典。训练集已经做了随机打乱排序而测试集没有。 Moses 工具中的脚本`tokenizer.perl` 用于切分单单词和标点符号。执行下面的命令就可以预处理数椐。 - -``` -cd demo/sentiment/ -./preprocess.sh -``` -preprocess.sh: - -``` -data_dir="./data/imdb" -python preprocess.py -i data_dir -``` - -* data_dir: 输入数椐所在目录。 -* preprocess.py: 预处理脚本。 - -运行成功后目录`demo/sentiment/data/pre-imdb` 结构如下: - -``` -dict.txt labels.list test.list test_part_000 train.list train_part_000 -``` -* test\_part\_000 and train\_part\_000: 所有标记的测试集和训练集, 训练集已经随机打乱。 -* train.list and test.list: 训练集和测试集文件列表。 -* dict.txt: 利用训练集生成的字典。 -* labels.txt: neg 0, pos 1, 含义:标签0表示负面的评论,标签1表示正面的评论。 - -### 用户自定义数椐预处理 - -如果你执行其它的用情感分析来分类文本的任务,可以按如下的结构来准备数椐. 我们提供了脚本来构建字典和预处理数椐。所以你只用按下面的结构来组织数椐就行了。 - -``` -dataset -|----train -| |----class1 -| | |----text_files -| |----class2 -| | |----text_files -| | ... -|----test -| |----class1 -| | |----text_files -| |----class2 -| | |----text_files -| | ... -``` -* dataset: 一级目录。 -* train, test: 二级目录。 -* class1,class2,...: 三级目录。 -* text_files: 文本格式的实例文件。 - -所有同目录下的文本实例文件都是同级别的。 每个文本文件包含一个或者多个实例,每一行表示一个实例。 为了充分的随机打乱训练集, 在预处理含有多行数椐的文本文件时参数设置稍有不同, 执行`preprocess.sh`脚本时需要加上`-m True`参数。 tokenizer.perl 默认用来切分单记和标点符号,如果你不需要这个操作,在运行`preprocess.sh`时加上`-t False`参数即可。 - -## 训练模型 - -在这步任务中,我们使用了循环神经网络(RNN)的 LSTM 架构来训练情感分析模型。 引入LSTM模型主要是为了克服消失梯度的问题。 LSTM网络类似于具有隐藏层的标准循环神经网络, 但是隐藏层中的每个普通节点被一个记忆单元替换。 每个记忆单元包含四个主要的元素: 输入门, 具有自循环连接的神经元,忘记门和输出门。 更多的细节可以在文献中找到[4]。 LSTM架构的最大优点是它可以在长时间间隔内记忆信息,而没有短时记忆的损失。在有新的单词来临的每一个时间步骤内,存储在记忆单元区块的历史信息被更新用来迭代的学习单词以合理的序列程现。 - -
![LSTM](../../../doc/demo/sentiment_analysis/lstm.png)
-
图表 1. LSTM [3]
- -情感分析是自然语言理解中最典型的问题之一。 它的目的是预测在一个序列中表达的情感态度。 通常, ,仅仅是一些关键词,如形容词和副词,在预测序列或段落的情感中起主要作用。然而有些评论上下文非常长,例如 IMDB的数椐集。 我们只所以使用LSTM来执行这个任务是因为其改进的设计并且具有门机制。 首先,它能够从词级到具有可变上下文长度的上下文级别来总结表示。 第二,它可以在句子级别利用可扩展的上下文, 而大多数方法只是利用n-gram级别的知识。第三,它直接学习段落表示,而不是组合上下文级别信息。 - -在本演示中,我们提供两个网络,即双向LSTM和三层堆叠LSTM。 - -#### 双向LSTM - -图2是双向LSTM网络,后面连全连接层和softmax层。 - -
![BiLSTM](../../../doc/demo/sentiment_analysis/bi_lstm.jpg)
-
图 2. Bidirectional-LSTM
- -#### Stacked-LSTM -图3是三层LSTM结构。图的底部是word embedding(对文档处理后形成的单词向量)。 接下来,连接三个LSTM隐藏层,并且第二个是反向LSTM。然后提取隐藏LSTM层的所有时间步长的最大词向量作为整个序列的表示。 最后,使用具有softmax激活的全连接前馈层来执行分类任务。 更多内容可查看参考文献 [5]。 - -
![StackedLSTM](../../../doc/demo/sentiment_analysis/stacked_lstm.jpg)
-
图 3. Stacked-LSTM for sentiment analysis
- -**配置** - -进入`demo/sentiment` 目录 , `trainer_config.py` 是一个配置文件的例子, 其中包含算法和网络配置。第一行从`sentiment_net.py`中导出预定义的网络。 - -trainer_config.py: - -```python -from sentiment_net import * - -data_dir = "./data/pre-imdb" -# whether this config is used for test -is_test = get_config_arg('is_test', bool, False) -# whether this config is used for prediction -is_predict = get_config_arg('is_predict', bool, False) -dict_dim, class_dim = sentiment_data(data_dir, is_test, is_predict) - -################## Algorithm Config ##################### - -settings( - batch_size=128, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) - -#################### Network Config ###################### -stacked_lstm_net(dict_dim, class_dim=class_dim, - stacked_num=3, is_predict=is_predict) -#bidirectional_lstm_net(dict_dim, class_dim=class_dim, is_predict=is_predict) -``` - -* **数椐定义**: - * get\_config\_arg(): 获取通过 `--config_args=xx` 设置的命令行参数。 - * 定义训练数椐和测试数椐提供者, 这里使用了PaddlePaddle的Python接口来加载数椐。想了解更多细节可以参考PyDataProvider部分的文档 - -* **算法配置**: - * 使用随机梯度下降(sgd)算法。 - * 使用 adam 优化。 - * 设置batch size大小为128。 - * 设置平均sgd窗口。 - * 设置全局学习率。 -* **网络配置**: - * dict_dim: 获取字典维度。 - * class_dim: 设置类别数,IMDB有两个标签,即正面评价标签和负面评价标签。 - * `stacked_lstm_net`: 预定义网络如图3所示,默认情况下使用此网络 - * `bidirectional_lstm_net`: 预定义网络,如图2所示。 - -**训练** - -首先安装PaddlePaddle。 然后使用下面的脚本 `train.sh` 来开启本地的训练。 - -``` -cd demo/sentiment/ -./train.sh -``` - -train.sh: - -``` -config=trainer_config.py -output=./model_output -paddle train --config=$config \ - --save_dir=$output \ - --job=train \ - --use_gpu=false \ - --trainer_count=4 \ - --num_passes=10 \ - --log_period=20 \ - --dot_period=20 \ - --show_parameter_stats_period=100 \ - --test_all_data_in_one_period=1 \ - 2>&1 | tee 'train.log' -``` - -* \--config=$config: 设置网络配置。 -* \--save\_dir=$output: 设置输出路径以保存训练完成的模型。 -* \--job=train: 设置工作模式为训练。 -* \--use\_gpu=false: 使用CPU训练,如果你安装GPU版本的PaddlePaddle,并想使用GPU来训练设置为true。 -* \--trainer\_count=4:设置线程数(或GPU个数)。 -* \--num\_passes=15: 设置pass,PaddlePaddle中的一个pass意味着对数据集中的所有样本进行一次训练。 -* \--log\_period=20: 每20个batch打印一次日志。 -* \--show\_parameter\_stats\_period=100: 每100个batch打印一次统计信息。 -* \--test\_all_data\_in\_one\_period=1: 每次测试都测试所有数据。 - -如果运行成功,输出日志保存在路径 `demo/sentiment/train.log`中,模型保存在目录`demo/sentiment/model_output/`中。 输出日志说明如下: - -``` -Batch=20 samples=2560 AvgCost=0.681644 CurrentCost=0.681644 Eval: classification_error_evaluator=0.36875 CurrentEval: classification_error_evaluator=0.36875 -... -Pass=0 Batch=196 samples=25000 AvgCost=0.418964 Eval: classification_error_evaluator=0.1922 -Test samples=24999 cost=0.39297 Eval: classification_error_evaluator=0.149406 -``` -- Batch=xx: 表示训练了xx个Batch。 -- samples=xx: 表示训练了xx个样本。。 -- AvgCost=xx: 从第0个batch到当前batch的平均损失。 -- CurrentCost=xx: 最新log_period个batch处理的当前损失。 -- Eval: classification\_error\_evaluator=xx: 表示第0个batch到当前batch的分类错误。 -- CurrentEval: classification\_error\_evaluator: 最新log_period个batch的分类错误。 -- Pass=0: 通过所有训练集一次称为一遍。 0表示第一次经过训练集。 - -默认情况下,我们使用`stacked_lstm_net`网络,当传递相同的样本数时,它的收敛速度比`bidirectional_lstm_net`快。如果要使用双向LSTM,只需删除最后一行中的注释并把“stacked_lstm_net”注释掉。 - -## 测试模型 - -测试模型是指使用训练出的模型评估已标记的验证集。 - -``` -cd demo/sentiment -./test.sh -``` - -test.sh: - -```bash -function get_best_pass() { - cat $1 | grep -Pzo 'Test .*\n.*pass-.*' | \ - sed -r 'N;s/Test.* error=([0-9]+\.[0-9]+).*\n.*pass-([0-9]+)/\1 \2/g' | \ - sort | head -n 1 -} - -log=train.log -LOG=`get_best_pass $log` -LOG=(${LOG}) -evaluate_pass="model_output/pass-${LOG[1]}" - -echo 'evaluating from pass '$evaluate_pass - -model_list=./model.list -touch $model_list | echo $evaluate_pass > $model_list -net_conf=trainer_config.py -paddle train --config=$net_conf \ - --model_list=$model_list \ - --job=test \ - --use_gpu=false \ - --trainer_count=4 \ - --config_args=is_test=1 \ - 2>&1 | tee 'test.log' -``` - -函数`get_best_pass`依据分类错误率获得最佳模型进行测试。 在本示例中,我们默认使用IMDB的测试数据集作为验证。 与训练不同,它需要在这里指定`--job = test`和模型路径,即`--model_list = $model_list`。如果运行成功,日志将保存在“demo / sentiment / test.log”的路径中。例如,在我们的测试中,最好的模型是`model_output / pass-00002`,分类误差是0.115645,如下: - -``` -Pass=0 samples=24999 AvgCost=0.280471 Eval: classification_error_evaluator=0.115645 -``` - -## 预测 - -`predict.py`脚本提供了一个预测接口。在使用它之前请安装PaddlePaddle的python api。 预测IMDB的未标记评论的一个实例如下: - -``` -cd demo/sentiment -./predict.sh -``` -predict.sh: - -``` -#Note the default model is pass-00002, you shold make sure the model path -#exists or change the mode path. -model=model_output/pass-00002/ -config=trainer_config.py -label=data/pre-imdb/labels.list -cat ./data/aclImdb/test/pos/10007_10.txt | python predict.py \ - --tconf=$config\ - --model=$model \ - --label=$label \ - --dict=./data/pre-imdb/dict.txt \ - --batch_size=1 -``` - -* `cat ./data/aclImdb/test/pos/10007_10.txt` : 输入预测样本。 -* `predict.py` : 预测接口脚本。 -* `--tconf=$config` : 设置网络配置。 -* `--model=$model` : 设置模型路径。 -* `--label=$label` : 设置标签类别字典,这个字典是整数标签和字符串标签的一个对应。 -* `--dict=data/pre-imdb/dict.txt` : 设置字典文件。 -* `--batch_size=1` : 设置batch size。 - -注意应该确保默认模型路径`model_output / pass-00002`存在或更改为其它模型路径。 - -本示例的预测结果: - -``` -Loading parameters from model_output/pass-00002/ -./data/aclImdb/test/pos/10014_7.txt: predicting label is pos -``` -我们真诚地感谢您的关注,并欢迎您来参与贡献。 - -## 参考文档 -[1] Brendan O'Connor, Ramnath Balasubramanyan, Bryan R. Routledge, and Noah A. Smith. 2010. [From Tweets to Polls: Linking Text Sentiment to Public Opinion Time Series](http://homes.cs.washington.edu/~nasmith/papers/oconnor+balasubramanyan+routledge+smith.icwsm10.pdf). In ICWSM-2010.
-[2] Johan Bollen, Huina Mao, Xiaojun Zeng. 2011. [Twitter mood predicts the stock market](http://arxiv.org/abs/1010.3003), Journal of Computational Science.
-[3] Alex Graves, Marcus Liwicki, Santiago Fernan- dez, Roman Bertolami, Horst Bunke, and Ju ̈rgen Schmidhuber. 2009. [A novel connectionist system for unconstrained handwriting recognition. IEEE Transactions on Pattern Analysis and Machine In- telligence](http://www.cs.toronto.edu/~graves/tpami_2009.pdf), 31(5):855–868.
-[4] Zachary C. Lipton, [A Critical Review of Recurrent Neural Networks for Sequence Learning](http://arxiv.org/abs/1506.00019v1), arXiv:1506.00019.
-[5] Jie Zhou and Wei Xu; [End-to-end Learning of Semantic Role Labeling Using Recurrent Neural Networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf); ACL-IJCNLP 2015.
+# 情感分析教程 + +情感分析有许多应用场景。 一个基本的应用场景是区分给定文本的褒贬两极性,给定的文本可以是一个文档、句子、或者是一个小的文本片段。 一个简单的例子如:把用户在购物网站、旅游网站、团购网站(亚马逊、天猫、淘宝等)上发表的评论分成正面评论和负面评论两类。 + +情感分析也常用于基于大量评论和个人博客来监控社会媒体。 例如,研究人员分析了几个关于消费者信心和政治观点的调查,结果发现它们与同时期的Twitter消息中的情绪词频率相关 [1]。 另一个例子是通过分析每日Twitter博客的文本内容来预测股票变动 [2]。 + +另一方面,抓取产品的用户评论并分析他们的情感,有助于理解用户对不同公司,不同产品,甚至不同竞争对手产品的偏好。 + +本教程将指导您完成长期短期记忆(LSTM)网络的训练过程,以分类来自[大型电影评论数据集](http://ai.stanford.edu/~amaas/data/sentiment/)(有时称为[互联网电影数据库 (IMDB)](http://ai.stanford.edu/~amaas/papers/wvSent_acl2011.pdf))的句子的情感 。 此数据集包含电影评论及其相关联的类别标签,即正面和负面。 + +## 数椐准备 + +### IMDB 数椐介绍 + +训练模型之前, 我们需要预处理数椐并构建一个字典。 首先, 你可以使用下面的脚本下载 IMDB 数椐集和[Moses](http://www.statmt.org/moses/)工具, 这是一个基于统计的机器翻译系统. 我们提供了一个数据预处理脚本,它不仅能够处理IMDB数据,还能处理其他用户自定义的数据。 为了使用提前编写的脚本,需要将标记的训练和测试样本移动到另一个路径,这已经在`get_imdb.sh`中完成。 + +``` +cd demo/sentiment/data +./get_imdb.sh +``` +如果数椐获取成功,你将在目录```./demo/sentiment/data```中看到下面的文件: + +``` +aclImdb get_imdb.sh imdb mosesdecoder-master +``` + +* aclImdb: 从外部网站上下载的原始数椐集。 +* imdb: 仅包含训练和测试数椐集。 +* mosesdecoder-master: Moses 工具。 + +IMDB数据集包含25,000个已标注过的高极性电影评论用于训练,25,000个用于测试。负面的评论的得分小于等于4,正面的评论的得大于等于7,总评分10分。 运行完脚本 `./get_imdb.sh`后, 我们可以看到在目录 `aclImdb`中的数椐集的结构如下: + +``` +imdbEr.txt imdb.vocab README test train +``` +* train: 训练数椐集。 +* test : 测试数椐集。 +* imdb.vocab: 字典文件。 +* imdbEr.txt: 字典imdb.vocab中每个切分单词的预期评级。 +* README: 数椐说明文档。 + +测试集和训练集目录包含下面的文件: + +``` +labeledBow.feat neg pos unsup unsupBow.feat urls_neg.txt urls_pos.txt urls_unsup.txt +``` + +* pos: 正面评价样本,包含12,500个txt文件,每个文件是一个电影评论。 +* neg: 负面评价样本,包含12,500个txt文件,每个文件是一个电影评论。 +* unsup: 未标记的评价样本,包含50,000个txt文件。 +* urls_xx.txt: 每个评论的网址。 +* xxBow.feat: 用于统计词频的Bow模型特征。 + +### IMDB 数椐准备 + +在这个例子中,我们只使用已经标注过的训练集和测试集,且默认在训练集上构建字典,而不使用IMDB数椐集中的imdb.vocab做为字典。训练集已经做了随机打乱排序而测试集没有。 Moses 工具中的脚本`tokenizer.perl` 用于切分单单词和标点符号。执行下面的命令就可以预处理数椐。 + +``` +cd demo/sentiment/ +./preprocess.sh +``` +preprocess.sh: + +``` +data_dir="./data/imdb" +python preprocess.py -i data_dir +``` + +* data_dir: 输入数椐所在目录。 +* preprocess.py: 预处理脚本。 + +运行成功后目录`demo/sentiment/data/pre-imdb` 结构如下: + +``` +dict.txt labels.list test.list test_part_000 train.list train_part_000 +``` +* test\_part\_000 and train\_part\_000: 所有标记的测试集和训练集, 训练集已经随机打乱。 +* train.list and test.list: 训练集和测试集文件列表。 +* dict.txt: 利用训练集生成的字典。 +* labels.txt: neg 0, pos 1, 含义:标签0表示负面的评论,标签1表示正面的评论。 + +### 用户自定义数椐预处理 + +如果你执行其它的用情感分析来分类文本的任务,可以按如下的结构来准备数椐. 我们提供了脚本来构建字典和预处理数椐。所以你只用按下面的结构来组织数椐就行了。 + +``` +dataset +|----train +| |----class1 +| | |----text_files +| |----class2 +| | |----text_files +| | ... +|----test +| |----class1 +| | |----text_files +| |----class2 +| | |----text_files +| | ... +``` +* dataset: 一级目录。 +* train, test: 二级目录。 +* class1,class2,...: 三级目录。 +* text_files: 文本格式的实例文件。 + +所有同目录下的文本实例文件都是同级别的。 每个文本文件包含一个或者多个实例,每一行表示一个实例。 为了充分的随机打乱训练集, 在预处理含有多行数椐的文本文件时参数设置稍有不同, 执行`preprocess.sh`脚本时需要加上`-m True`参数。 tokenizer.perl 默认用来切分单记和标点符号,如果你不需要这个操作,在运行`preprocess.sh`时加上`-t False`参数即可。 + +## 训练模型 + +在这步任务中,我们使用了循环神经网络(RNN)的 LSTM 架构来训练情感分析模型。 引入LSTM模型主要是为了克服消失梯度的问题。 LSTM网络类似于具有隐藏层的标准循环神经网络, 但是隐藏层中的每个普通节点被一个记忆单元替换。 每个记忆单元包含四个主要的元素: 输入门, 具有自循环连接的神经元,忘记门和输出门。 更多的细节可以在文献中找到[4]。 LSTM架构的最大优点是它可以在长时间间隔内记忆信息,而没有短时记忆的损失。在有新的单词来临的每一个时间步骤内,存储在记忆单元区块的历史信息被更新用来迭代的学习单词以合理的序列程现。 + +
![LSTM](src/lstm.png)
+
图表 1. LSTM [3]
+ +情感分析是自然语言理解中最典型的问题之一。 它的目的是预测在一个序列中表达的情感态度。 通常, ,仅仅是一些关键词,如形容词和副词,在预测序列或段落的情感中起主要作用。然而有些评论上下文非常长,例如 IMDB的数椐集。 我们只所以使用LSTM来执行这个任务是因为其改进的设计并且具有门机制。 首先,它能够从词级到具有可变上下文长度的上下文级别来总结表示。 第二,它可以在句子级别利用可扩展的上下文, 而大多数方法只是利用n-gram级别的知识。第三,它直接学习段落表示,而不是组合上下文级别信息。 + +在本演示中,我们提供两个网络,即双向LSTM和三层堆叠LSTM。 + +#### 双向LSTM + +图2是双向LSTM网络,后面连全连接层和softmax层。 + +
![BiLSTM](src/bi_lstm.jpg)
+
图 2. Bidirectional-LSTM
+ +#### Stacked-LSTM +图3是三层LSTM结构。图的底部是word embedding(对文档处理后形成的单词向量)。 接下来,连接三个LSTM隐藏层,并且第二个是反向LSTM。然后提取隐藏LSTM层的所有时间步长的最大词向量作为整个序列的表示。 最后,使用具有softmax激活的全连接前馈层来执行分类任务。 更多内容可查看参考文献 [5]。 + +
![StackedLSTM](src/stacked_lstm.jpg)
+
图 3. Stacked-LSTM for sentiment analysis
+ +**配置** + +进入`demo/sentiment` 目录 , `trainer_config.py` 是一个配置文件的例子, 其中包含算法和网络配置。第一行从`sentiment_net.py`中导出预定义的网络。 + +trainer_config.py: + +```python +from sentiment_net import * + +data_dir = "./data/pre-imdb" +# whether this config is used for test +is_test = get_config_arg('is_test', bool, False) +# whether this config is used for prediction +is_predict = get_config_arg('is_predict', bool, False) +dict_dim, class_dim = sentiment_data(data_dir, is_test, is_predict) + +################## Algorithm Config ##################### + +settings( + batch_size=128, + learning_rate=2e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(8e-4), + gradient_clipping_threshold=25 +) + +#################### Network Config ###################### +stacked_lstm_net(dict_dim, class_dim=class_dim, + stacked_num=3, is_predict=is_predict) +#bidirectional_lstm_net(dict_dim, class_dim=class_dim, is_predict=is_predict) +``` + +* **数椐定义**: + * get\_config\_arg(): 获取通过 `--config_args=xx` 设置的命令行参数。 + * 定义训练数椐和测试数椐提供者, 这里使用了PaddlePaddle的Python接口来加载数椐。想了解更多细节可以参考PyDataProvider部分的文档 + +* **算法配置**: + * 使用随机梯度下降(sgd)算法。 + * 使用 adam 优化。 + * 设置batch size大小为128。 + * 设置平均sgd窗口。 + * 设置全局学习率。 +* **网络配置**: + * dict_dim: 获取字典维度。 + * class_dim: 设置类别数,IMDB有两个标签,即正面评价标签和负面评价标签。 + * `stacked_lstm_net`: 预定义网络如图3所示,默认情况下使用此网络 + * `bidirectional_lstm_net`: 预定义网络,如图2所示。 + +**训练** + +首先安装PaddlePaddle。 然后使用下面的脚本 `train.sh` 来开启本地的训练。 + +``` +cd demo/sentiment/ +./train.sh +``` + +train.sh: + +``` +config=trainer_config.py +output=./model_output +paddle train --config=$config \ + --save_dir=$output \ + --job=train \ + --use_gpu=false \ + --trainer_count=4 \ + --num_passes=10 \ + --log_period=20 \ + --dot_period=20 \ + --show_parameter_stats_period=100 \ + --test_all_data_in_one_period=1 \ + 2>&1 | tee 'train.log' +``` + +* \--config=$config: 设置网络配置。 +* \--save\_dir=$output: 设置输出路径以保存训练完成的模型。 +* \--job=train: 设置工作模式为训练。 +* \--use\_gpu=false: 使用CPU训练,如果你安装GPU版本的PaddlePaddle,并想使用GPU来训练设置为true。 +* \--trainer\_count=4:设置线程数(或GPU个数)。 +* \--num\_passes=15: 设置pass,PaddlePaddle中的一个pass意味着对数据集中的所有样本进行一次训练。 +* \--log\_period=20: 每20个batch打印一次日志。 +* \--show\_parameter\_stats\_period=100: 每100个batch打印一次统计信息。 +* \--test\_all_data\_in\_one\_period=1: 每次测试都测试所有数据。 + +如果运行成功,输出日志保存在路径 `demo/sentiment/train.log`中,模型保存在目录`demo/sentiment/model_output/`中。 输出日志说明如下: + +``` +Batch=20 samples=2560 AvgCost=0.681644 CurrentCost=0.681644 Eval: classification_error_evaluator=0.36875 CurrentEval: classification_error_evaluator=0.36875 +... +Pass=0 Batch=196 samples=25000 AvgCost=0.418964 Eval: classification_error_evaluator=0.1922 +Test samples=24999 cost=0.39297 Eval: classification_error_evaluator=0.149406 +``` +- Batch=xx: 表示训练了xx个Batch。 +- samples=xx: 表示训练了xx个样本。。 +- AvgCost=xx: 从第0个batch到当前batch的平均损失。 +- CurrentCost=xx: 最新log_period个batch处理的当前损失。 +- Eval: classification\_error\_evaluator=xx: 表示第0个batch到当前batch的分类错误。 +- CurrentEval: classification\_error\_evaluator: 最新log_period个batch的分类错误。 +- Pass=0: 通过所有训练集一次称为一遍。 0表示第一次经过训练集。 + +默认情况下,我们使用`stacked_lstm_net`网络,当传递相同的样本数时,它的收敛速度比`bidirectional_lstm_net`快。如果要使用双向LSTM,只需删除最后一行中的注释并把“stacked_lstm_net”注释掉。 + +## 测试模型 + +测试模型是指使用训练出的模型评估已标记的验证集。 + +``` +cd demo/sentiment +./test.sh +``` + +test.sh: + +```bash +function get_best_pass() { + cat $1 | grep -Pzo 'Test .*\n.*pass-.*' | \ + sed -r 'N;s/Test.* error=([0-9]+\.[0-9]+).*\n.*pass-([0-9]+)/\1 \2/g' | \ + sort | head -n 1 +} + +log=train.log +LOG=`get_best_pass $log` +LOG=(${LOG}) +evaluate_pass="model_output/pass-${LOG[1]}" + +echo 'evaluating from pass '$evaluate_pass + +model_list=./model.list +touch $model_list | echo $evaluate_pass > $model_list +net_conf=trainer_config.py +paddle train --config=$net_conf \ + --model_list=$model_list \ + --job=test \ + --use_gpu=false \ + --trainer_count=4 \ + --config_args=is_test=1 \ + 2>&1 | tee 'test.log' +``` + +函数`get_best_pass`依据分类错误率获得最佳模型进行测试。 在本示例中,我们默认使用IMDB的测试数据集作为验证。 与训练不同,它需要在这里指定`--job = test`和模型路径,即`--model_list = $model_list`。如果运行成功,日志将保存在“demo / sentiment / test.log”的路径中。例如,在我们的测试中,最好的模型是`model_output / pass-00002`,分类误差是0.115645,如下: + +``` +Pass=0 samples=24999 AvgCost=0.280471 Eval: classification_error_evaluator=0.115645 +``` + +## 预测 + +`predict.py`脚本提供了一个预测接口。在使用它之前请安装PaddlePaddle的python api。 预测IMDB的未标记评论的一个实例如下: + +``` +cd demo/sentiment +./predict.sh +``` +predict.sh: + +``` +#Note the default model is pass-00002, you shold make sure the model path +#exists or change the mode path. +model=model_output/pass-00002/ +config=trainer_config.py +label=data/pre-imdb/labels.list +cat ./data/aclImdb/test/pos/10007_10.txt | python predict.py \ + --tconf=$config\ + --model=$model \ + --label=$label \ + --dict=./data/pre-imdb/dict.txt \ + --batch_size=1 +``` + +* `cat ./data/aclImdb/test/pos/10007_10.txt` : 输入预测样本。 +* `predict.py` : 预测接口脚本。 +* `--tconf=$config` : 设置网络配置。 +* `--model=$model` : 设置模型路径。 +* `--label=$label` : 设置标签类别字典,这个字典是整数标签和字符串标签的一个对应。 +* `--dict=data/pre-imdb/dict.txt` : 设置字典文件。 +* `--batch_size=1` : 设置batch size。 + +注意应该确保默认模型路径`model_output / pass-00002`存在或更改为其它模型路径。 + +本示例的预测结果: + +``` +Loading parameters from model_output/pass-00002/ +./data/aclImdb/test/pos/10014_7.txt: predicting label is pos +``` +我们真诚地感谢您的关注,并欢迎您来参与贡献。 + +## 参考文档 +[1] Brendan O'Connor, Ramnath Balasubramanyan, Bryan R. Routledge, and Noah A. Smith. 2010. [From Tweets to Polls: Linking Text Sentiment to Public Opinion Time Series](http://homes.cs.washington.edu/~nasmith/papers/oconnor+balasubramanyan+routledge+smith.icwsm10.pdf). In ICWSM-2010.
+[2] Johan Bollen, Huina Mao, Xiaojun Zeng. 2011. [Twitter mood predicts the stock market](http://arxiv.org/abs/1010.3003), Journal of Computational Science.
+[3] Alex Graves, Marcus Liwicki, Santiago Fernan- dez, Roman Bertolami, Horst Bunke, and Ju ̈rgen Schmidhuber. 2009. [A novel connectionist system for unconstrained handwriting recognition. IEEE Transactions on Pattern Analysis and Machine In- telligence](http://www.cs.toronto.edu/~graves/tpami_2009.pdf), 31(5):855–868.
+[4] Zachary C. Lipton, [A Critical Review of Recurrent Neural Networks for Sequence Learning](http://arxiv.org/abs/1506.00019v1), arXiv:1506.00019.
+[5] Jie Zhou and Wei Xu; [End-to-end Learning of Semantic Role Labeling Using Recurrent Neural Networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf); ACL-IJCNLP 2015.
diff --git a/doc/tutorials/sentiment_analysis/src/bi_lstm.jpg b/doc/tutorials/sentiment_analysis/src/bi_lstm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adec1606d64d6e35ffe7e62abfa9a09309b05c84 Binary files /dev/null and b/doc/tutorials/sentiment_analysis/src/bi_lstm.jpg differ diff --git a/doc/tutorials/sentiment_analysis/src/lstm.png b/doc/tutorials/sentiment_analysis/src/lstm.png new file mode 100644 index 0000000000000000000000000000000000000000..aaf1fc690da2ffb8418cde5ed81848ddb5263030 Binary files /dev/null and b/doc/tutorials/sentiment_analysis/src/lstm.png differ diff --git a/doc/tutorials/sentiment_analysis/src/stacked_lstm.jpg b/doc/tutorials/sentiment_analysis/src/stacked_lstm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4239055050966e0095e188a8c81d860711bce29d Binary files /dev/null and b/doc/tutorials/sentiment_analysis/src/stacked_lstm.jpg differ diff --git a/doc_cn/CMakeLists.txt b/doc_cn/CMakeLists.txt deleted file mode 100644 index 314b34525ca1d328f4e3b9814ee26deed39d89fd..0000000000000000000000000000000000000000 --- a/doc_cn/CMakeLists.txt +++ /dev/null @@ -1,31 +0,0 @@ -if(NOT DEFINED SPHINX_THEME) - set(SPHINX_THEME default) -endif() - -if(NOT DEFINED SPHINX_THEME_DIR) - set(SPHINX_THEME_DIR) -endif() - -# configured documentation tools and intermediate build results -set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") - -# Sphinx cache with pickled ReST documents -set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") - -# HTML output directory -set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html") - -configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in" - "${BINARY_BUILD_DIR}/conf.py" - @ONLY) - -sphinx_add_target(paddle_docs_cn - html - ${BINARY_BUILD_DIR} - ${SPHINX_CACHE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR} - ${SPHINX_HTML_DIR}) - -add_dependencies(paddle_docs_cn - gen_proto_py) diff --git a/doc_cn/build_and_install/cmake/index.rst b/doc_cn/build_and_install/cmake/index.rst deleted file mode 100644 index e2a12c500177ea5b075416380796ab82e1217f60..0000000000000000000000000000000000000000 --- a/doc_cn/build_and_install/cmake/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -使用cmake编译PaddlePaddle -========================= - -.. toctree:: - - install_deps.rst - compile_options.rst - make_and_install.rst diff --git a/doc_cn/build_and_install/cmake/install_deps.rst b/doc_cn/build_and_install/cmake/install_deps.rst deleted file mode 100644 index 7fa4665a954bd41e74145c4a1b00734c3ac41d83..0000000000000000000000000000000000000000 --- a/doc_cn/build_and_install/cmake/install_deps.rst +++ /dev/null @@ -1,4 +0,0 @@ -安装编译PaddlePaddle需要的依赖 -============================== - -参见 `安装编译依赖 <../../../doc/build/build_from_source.html#install-dependencies>`_ diff --git a/doc_cn/build_and_install/cmake/make_and_install.rst b/doc_cn/build_and_install/cmake/make_and_install.rst deleted file mode 100644 index 212b9c9352b01db5215221a6c2faafe0d679d962..0000000000000000000000000000000000000000 --- a/doc_cn/build_and_install/cmake/make_and_install.rst +++ /dev/null @@ -1,4 +0,0 @@ -make和make install -================== - -参见 `make和make install <../../../doc/build/build_from_source.html#build-and-install>`_ diff --git a/doc_cn/build_and_install/install/paddle_ssh.Dockerfile b/doc_cn/build_and_install/install/paddle_ssh.Dockerfile deleted file mode 100644 index 7cb947bddf4593259cb69f525b44015836291605..0000000000000000000000000000000000000000 --- a/doc_cn/build_and_install/install/paddle_ssh.Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM paddledev/paddle:cpu-latest - -MAINTAINER PaddlePaddle dev team - -RUN apt-get update -RUN apt-get install -y openssh-server -RUN mkdir /var/run/sshd -RUN echo 'root:root' | chpasswd - -RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config -RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config - -EXPOSE 22 - -CMD ["/usr/sbin/sshd", "-D"] diff --git a/doc_cn/build_and_install/install/paddle_version.txt b/doc_cn/build_and_install/install/paddle_version.txt deleted file mode 100644 index a80873303fd0d05d963482629000d76260185ef6..0000000000000000000000000000000000000000 --- a/doc_cn/build_and_install/install/paddle_version.txt +++ /dev/null @@ -1,11 +0,0 @@ -PaddlePaddle 0.8.0b1, compiled with - with_avx: ON - with_gpu: OFF - with_double: OFF - with_python: ON - with_rdma: OFF - with_glog: ON - with_gflags: ON - with_metric_learning: - with_timer: OFF - with_predict_sdk: diff --git a/doc_cn/cluster/index.rst b/doc_cn/cluster/index.rst deleted file mode 100644 index 25313a9635bbf567a1aedfac3c379802d601d283..0000000000000000000000000000000000000000 --- a/doc_cn/cluster/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -集群训练 -======== - -* `集群训练 <../../doc/cluster/index.html>`_ - -.. toctree:: - :maxdepth: 2 - :glob: - - 集群训练(对内) - diff --git a/doc_cn/demo/index.rst b/doc_cn/demo/index.rst deleted file mode 100644 index e15e839f93d4ac0d455e49fd8b1cde8bf60a29ac..0000000000000000000000000000000000000000 --- a/doc_cn/demo/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -使用示例 -======== - -图像 -'''' - -* `图像分类 <../../doc/demo/image_classification/index.html>`_ - -自然语言处理 -'''''''''''' - -* `情感分析 `_ -* `文本生成 <../../doc/demo/text_generation/index.html>`_ -* `词性标注 <../../doc/demo/semantic_role_labeling/index.html>`_ - -推荐 -'''' - -* `MovieLens数据集 <../../doc/demo/rec/ml_dataset.html>`_ -* `MovieLens评分回归 <../../doc/demo/rec/ml_regression.html>`_ - -常用模型 -'''''''' - -* `ImageNet: ResNet <../../doc/demo/imagenet_model/resnet_model.html>`_ -* `Embedding: Chinese Word <../../doc/demo/embedding_model/index.html>`_ diff --git a/doc_cn/demo/quick_start/index.md b/doc_cn/demo/quick_start/index.md deleted file mode 100644 index 4a6e07ee1ffd94cf8f781af307b53a96a78e6b93..0000000000000000000000000000000000000000 --- a/doc_cn/demo/quick_start/index.md +++ /dev/null @@ -1,543 +0,0 @@ -# PaddlePaddle快速入门教程 - -我们以文本分类问题作为背景,介绍PaddlePaddle使用流程和常用的网络基础单元的配置方法。 - -## 安装(Install) - -首先请参考安装教程安装PaddlePaddle。 - -## 使用概述(Overview) - -**文本分类问题**:对于给定的一条文本, 我们从提前给定的类别集合中选择其所属类 -别。比如通过用户对电子商务网站评论,评估产品的质量: - -- 这个显示器很棒! (好评) -- 用了两个月之后这个显示器屏幕碎了。(差评) - -每一个任务流程都可以分为如下5个基础部分。 -
![](./Pipeline.jpg)
- -1. 数据格式准备 - - 每行保存一条样本,类别Id 和文本信息用Tab间隔, 文本中的单词用空格分隔(如果不切词,则字与字之间用空格分隔),例如:```类别Id ‘\t’ 这 个 显 示 器 很 棒 !``` -2. 数据向模型传送 - - PaddlePaddle可以读取Python写的传输数据脚本,所有字符都将转换为连续整数表示的Id传给模型 -3. 网络结构(由易到难展示4种不同的网络配置) - - 逻辑回归模型 - - 词向量模型 - - 卷积模型 - - 时序模型 - - 优化算法 -4. 训练模型 -5. 预测 - -## 数据格式准备(Data Preparation) -在本问题中,我们使用[Amazon电子产品评论数据](http://jmcauley.ucsd.edu/data/amazon/), -将评论分为好评(正样本)和差评(负样本)两类。[源码](https://github.com/PaddlePaddle/Paddle)的`demo/quick_start`里提供了下载已经预处理数据的脚本(如果想从最原始的数据处理,可以使用脚本 `./demo/quick_start/data/proc_from_raw_data/get_data.sh`)。 - -```bash -cd demo/quick_start -./data/get_data.sh -``` - -## 数据向模型传送(Transfer Data to Model) - -### Python数据加载脚本(Data Provider Script) - -下面dataprovider_bow.py文件给出了完整例子,主要包括两部分: - -* initalizer: 定义文本信息、类别Id的数据类型。 -* process: yield文本信息和类别Id,和initalizer里定义顺序一致。 - -```python -from paddle.trainer.PyDataProvider2 import * - -# id of the word not in dictionary -UNK_IDX = 0 - -# initializer is called by the framework during initialization. -# It allows the user to describe the data types and setup the -# necessary data structure for later use. -# `settings` is an object. initializer need to properly fill settings.input_types. -# initializer can also store other data structures needed to be used at process(). -# In this example, dictionary is stored in settings. -# `dictionay` and `kwargs` are arguments passed from trainer_config.lr.py -def initializer(settings, dictionary, **kwargs): - # Put the word dictionary into settings - settings.word_dict = dictionary - - # setting.input_types specifies what the data types the data provider - # generates. - settings.input_types = [ - # The first input is a sparse_binary_vector, - # which means each dimension of the vector is either 0 or 1. It is the - # bag-of-words (BOW) representation of the texts. - sparse_binary_vector(len(dictionary)), - # The second input is an integer. It represents the category id of the - # sample. 2 means there are two labels in the dataset. - # (1 for positive and 0 for negative) - integer_value(2)] - -# Delaring a data provider. It has an initializer 'data_initialzer'. -# It will cache the generated data of the first pass in memory, so that -# during later pass, no on-the-fly data generation will be needed. -# `setting` is the same object used by initializer() -# `file_name` is the name of a file listed train_list or test_list file given -# to define_py_data_sources2(). See trainer_config.lr.py. -@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, file_name): - # Open the input data file. - with open(file_name, 'r') as f: - # Read each line. - for line in f: - # Each line contains the label and text of the comment, separated by \t. - label, comment = line.strip().split('\t') - - # Split the words into a list. - words = comment.split() - - # convert the words into a list of ids by looking them up in word_dict. - word_vector = [settings.word_dict.get(w, UNK_IDX) for w in words] - - # Return the features for the current comment. The first is a list - # of ids representing a 0-1 binary sparse vector of the text, - # the second is the integer id of the label. - yield word_vector, int(label) -``` - -### 配置中的数据加载定义(Data Provider in Configure) - -在模型配置中利用`define_py_data_sources2`加载数据: - -```python -from paddle.trainer_config_helpers import * - -file = "data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i -# define the data sources for the model. -# We need to use different process for training and prediction. -# For training, the input data includes both word IDs and labels. -# For prediction, the input data only includs word Ids. -define_py_data_sources2(train_list='data/train.list', - test_list='data/test.list', - module="dataprovider_bow", - obj="process", - args={"dictionary": word_dict}) -``` -* data/train.list,data/test.list: 指定训练、测试数据 -* module="dataprovider": 数据处理Python文件名 -* obj="process": 指定生成数据的函数 -* args={"dictionary": word_dict}: 额外的参数,这里指定词典 - -更详细数据格式和用例请参考 -PyDataProvider2。 - -## 网络结构(Network Architecture) -本节我们将专注于网络结构的介绍。 -
![](./PipelineNetwork.jpg)
- -我们将以基本的逻辑回归网络作为起点,并逐渐展示更加深入的功能。更详细的网络配置 -连接请参考Layer文档。 -所有配置在[源码](https://github.com/PaddlePaddle/Paddle)`demo/quick_start`目录,首先列举逻辑回归网络。 - -### 逻辑回归模型(Logistic Regression) - -流程如下: -
![](./NetLR.jpg)
- -- 获取利用one-hot vector表示的每个单词,维度是词典大小 - -```python -word = data_layer(name="word", size=word_dim) -``` - -- 获取该条样本类别Id,维度是类别个数。 - -```python -label = data_layer(name="label", size=label_dim) -``` - -- 利用逻辑回归模型对该向量进行分类,同时会计算分类准确率 - -```python -# Define a fully connected layer with logistic activation (also called softmax activation). -output = fc_layer(input=word, - size=label_dim, - act_type=SoftmaxActivation()) -# Define cross-entropy classification loss and error. -classification_cost(input=output, label=label) -``` - - - input: 除过data层,每个层都有一个或多个input,多个input以list方式输入 - - size: 该层神经元个数 - - act_type: 激活函数类型 - -效果总结:我们将在后面介绍训练和预测的流程的脚本。在此为方便对比不同网络结构, -我们随时总结了各个网络的复杂度和效果。 - - -
- - - - - - - - - - - - - - - - - -
网络名称参数数量错误率
逻辑回归252 KB8.652%
- -
- -### 词向量模型(Word Vector) - -embedding模型需要稍微改变数据提供的脚本,即`dataprovider_emb.py`,词向量模型、 -卷积模型、时序模型均使用该脚本。其中文本输入类型定义为整数时序类型integer_value_sequence。 - -``` -def initializer(settings, dictionary, **kwargs): - settings.word_dict = dictionary - settings.input_types = [ - # Define the type of the first input as sequence of integer. - # The value of the integers range from 0 to len(dictrionary)-1 - integer_value_sequence(len(dictionary)), - # Define the second input for label id - integer_value(2)] - -@provider(init_hook=initializer) -def process(settings, file_name): - ... - # omitted, it is same as the data provider for LR model -``` - -该模型依然是使用逻辑回归分类网络的框架, 只是将句子利用连续向量表示替换稀疏 -向量表示, 即对第3步进行替换。句子表示的计算更新为2步: -
![](./NetContinuous.jpg)
- -- 利用单词Id查找对应的该单词的连续表示向量(维度为word_dim), 输入N个单词,输出为N个word_dim维度向量 - -```python -emb = embedding_layer(input=word, size=word_dim) -``` - -- 将该句话包含的所有单词向量求平均得到句子的表示 - -```python -avg = pooling_layer(input=emb, pooling_type=AvgPooling()) -``` - -其它部分和逻辑回归网络结构一致。 -效果总结: - - -
- - - - - - - - - - - - - - - - - -
网络名称参数数量错误率
词向量模型15 MB8.484%
-
-
- -### 卷积模型(Convolution) -卷积网络是一种特殊的从词向量表示到句子表示的方法, 也就是将词向量模型额步 -骤3-2进行进一步演化, 变为3个新的子步骤。 -
![](./NetConv.jpg)
- -文本卷积分为三个步骤: -1. 获取每个单词左右各k个近邻, 拼接成一个新的向量表示; -2. 对该表示进行非线性变换 (例如Sigmoid变换), 成为维度为hidden_dim的新的向量; -3. 在每个维度上取出在该句话新的向量集合上该维度的最大值作为最后的句子表示向量。 这3个子步骤可配置为: - -```python -text_conv = sequence_conv_pool(input=emb, - context_start=k, - context_len=2 * k + 1) -``` - -效果总结: - - -
- - - - - - - - - - - - - - - - - -
网络名称参数数量错误率
卷积模型16 MB5.628%
-
- -### 时序模型(Time Sequence) -
![](./NetRNN.jpg)
- -时序模型即为RNN模型, 包括简单的RNN模型、GRU模型、LSTM模型等。 - -- GRU模型配置: - -```python -gru = simple_gru(input=emb, size=gru_size) -``` - -- LSTM模型配置: - -```python -lstm = simple_lstm(input=emb, size=lstm_size) -``` - -针对本问题,我们采用单层LSTM模型,并使用了Dropout,效果总结: - - -
- - - - - - - - - - - - - - - - - -
网络名称参数数量错误率
时序模型16 MB4.812%
- -
- -## 优化算法(Optimization Algorithm) -优化算法包括 -Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优化方法,加了L2正则和梯度截断。 - -```python -settings(batch_size=128, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) -``` - -## 训练模型(Training Model) -在完成了数据和网络结构搭建之后, 我们进入到训练部分。 -
![](./PipelineTrain.jpg)
- -训练脚本:我们将训练的命令行保存在了 `train.sh`文件中。训练时所需设置的主要参数如下: - -```bash -paddle train \ ---config=trainer_config.py \ ---log_period=20 \ ---save_dir=./output \ ---num_passes=15 \ ---use_gpu=false -``` -这里没有介绍多机分布式训练,可以参考分布式训练的demo学习如何进行多机训练。 - -## 预测(Prediction) -可以使用训练好的模型评估带有label的验证集,也可以预测没有label的测试集。 -
![](./PipelineTest.jpg)
- -测试脚本如下,将会测试配置文件中test.list指定的数据。 - -```bash -paddle train \ ---use_gpu=false \ ---job=test \ ---init_model_path=./output/pass-0000x -``` - -可以参考Python API预测 -教程,或其他demo的Python预测过程。也可以通过如下方式预测。 - -预测脚本(`predict.sh`): - -```bash -model="output/pass-00003" -paddle train \ - --config=trainer_config.lstm.py \ - --use_gpu=false \ - --job=test \ - --init_model_path=$model \ - --config_args=is_predict=1 \ - --predict_output_dir=. \ - -mv rank-00000 result.txt -``` -这里以`output/pass-00003`为例进行预测,用户可以根据训练log选择test结果最好的模型来预测。与训练网络配置不同的是:无需label相关的层,指定outputs输出概率层(softmax输出), -指定batch_size=1,数据传输无需label数据,预测数据指定test_list的位置。 - -预测结果以文本的形式保存在`result.txt`中,一行为一个样本,格式如下: - -``` -预测ID;ID为0的概率 ID为1的概率 -预测ID;ID为0的概率 ID为1的概率 -``` - -``` -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -obj = 'process' if not is_predict else 'process_pre' -batch_size = 128 if not is_predict else 1 -if is_predict: - maxid = maxid_layer(output) - outputs([maxid,output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) - outputs(cls) -``` - -## 总体效果总结(Summary) -这些流程中的数据下载、网络配置、训练脚本在`/demo/quick_start`目录,我们在此总 -结上述网络结构在Amazon-Elec测试集(25k)上的效果: - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
网络名称参数数量错误率配置文件
逻辑回归模型 252KB 8.652%trainer_config.lr.py
词向量模型 15MB 8.484%trainer_config.emb.py
卷积模型 16MB 5.628%trainer_config.cnn.py
时序模型 16MB 4.812%trainer_config.lstm.py
-
-
- -## 附录(Appendix) -### 命令行参数(Command Line Argument) - -* \--config:网络配置 -* \--save_dir:模型存储路径 -* \--log_period:每隔多少batch打印一次日志 -* \--num_passes:训练轮次,一个pass表示过一遍所有训练样本 -* \--config_args:命令指定的参数会传入网络配置中。 -* \--init_model_path:指定初始化模型路径,可用在测试或训练时指定初始化模型。 - -默认一个pass保存一次模型,也可以通过saving_period_by_batches设置每隔多少batch保存一次模型。 -可以通过show_parameter_stats_period设置打印参数信息等。 -其他参数请参考令行参数文档。 - -### 输出日志(Log) - -``` -TrainerInternal.cpp:160] Batch=20 samples=2560 AvgCost=0.628761 CurrentCost=0.628761 Eval: classification_error_evaluator=0.304297 CurrentEval: classification_error_evaluator=0.304297 -``` -模型训练会看到这样的日志,详细的参数解释如下面表格: -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
名称解释
Batch=20 表示过了20个batch
samples=2560 表示过了2560个样本
AvgCost 每个pass的第0个batch到当前batch所有样本的平均cost
CurrentCost 当前log_period个batch所有样本的平均cost
Eval: classification_error_evaluator 每个pass的第0个batch到当前batch所有样本的平均分类错误率
CurrentEval: classification_error_evaluator 当前log_period个batch所有样本的平均分类错误率
-
-
diff --git a/doc_cn/demo/sentiment_analysis/index.rst b/doc_cn/demo/sentiment_analysis/index.rst deleted file mode 100644 index 9d7972b219851d117b1ce72d8eb83eea256e2f87..0000000000000000000000000000000000000000 --- a/doc_cn/demo/sentiment_analysis/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -情感分析教程 -=========================== - -.. toctree:: - :maxdepth: 3 - :glob: - - Training Locally \ No newline at end of file diff --git a/doc_cn/howto/build_docker_image.rst b/doc_cn/howto/build_docker_image.rst deleted file mode 100644 index 46ba07d9ad7c1e1843cd953fa5c5fe1dedf6cdf1..0000000000000000000000000000000000000000 --- a/doc_cn/howto/build_docker_image.rst +++ /dev/null @@ -1,35 +0,0 @@ -构建PaddlePaddle的Docker Image -============================== -PaddlePaddle的Docker Image构建源码放置在 ``${源码根目录}/paddle/scripts/docker/`` 目录下。该目录有三类文件: - -- Dockerfile:Docker Image的描述文件,包括构建步骤、各种参数和维护人员等。 - - - 一共维护了12个Dockerfile,Dockerfile.m4是它们的模板。 - - PaddlePaddle中所有的Image都基于ubuntu 14.04。 - -- build.sh:Docker Image的构建脚本,使用方式见下一小节。 -- generate.sh:通过Dockerfile.m4模板生成不同的Dockerfile。 - -使用脚本构建Docker Image ------------------------- - -进入源码目录,执行 ``docker build`` 命令,即可在本地编译出PaddlePaddle的镜像。简单的使用样例为 - -.. code-block:: bash - - cd ${源码根目录}/paddle/scripts/docker/ - docker build --build-arg LOWEST_DL_SPEED=50K \ - --build-arg WITH_GPU=ON \ - --tag paddle_gpu:latest . - -其中,``--build-arg`` 传入的配置参数包括: - -- LOWEST\_DL\_SPEED\: 在多线程下载过程中,设置下载线程的最低速度。 - - - 默认单位是Bytes,但可以传入10K、10M、或10G等这样的单位。 - - 如果小于这个速度,那么这个线程将会关闭。当所有的线程都关闭了,那么下载进程将会重启。 -- WITH\_GPU\: ON or OFF,是否开启GPU功能。注意, - - **编译** PaddlePaddle的GPU版本 **不一定** 要在具有GPU的机器上进行。 - - **运行** PaddlePaddle的GPU版本 **一定** 要在具有GPU的机器上运行。 - -注意:所有Image的构建在Docker 1.12版本测试通过, 低于1.12的版本并没有测试。原因是旧版本可能缺乏 ``--build-arg`` 参数,从而不能在运行编译命令的时候接受参数。 diff --git a/doc_cn/index.rst b/doc_cn/index.rst deleted file mode 100644 index 88a9f79fd23c97785a054af2aa9ee53f8578ef63..0000000000000000000000000000000000000000 --- a/doc_cn/index.rst +++ /dev/null @@ -1,32 +0,0 @@ -PaddlePaddle文档 -================ - -使用指南 --------- -* `介绍 `_ -* `快速入门 `_ -* `基本使用概念 `_ -* `编译与安装 `_ -* `用户接口 `_ -* `使用示例 `_ -* `模型配置 <../doc/ui/api/trainer_config_helpers/index.html>`_ -* `集群训练 `_ - -开发指南 --------- -* `新写Layer <../doc/dev/new_layer/index.html>`_ -* `如何贡献文档 `_ -* `如何构建Docker Image `_ - -算法教程 --------- - -* `Recurrent Group教程 `_ -* `单层RNN示例 <../doc/algorithm/rnn/rnn.html>`_ -* :ref:`algo_hrnn_rnn_api_compare` -* `支持双层序列作为输入的Layer `_ - -常见问题 --------- - -* `常见问题 `_ diff --git a/doc_cn/introduction/parameters.png b/doc_cn/introduction/parameters.png deleted file mode 100644 index 2ec67480951e21f0400bce1c34b3108dcd65c18c..0000000000000000000000000000000000000000 Binary files a/doc_cn/introduction/parameters.png and /dev/null differ diff --git a/doc_cn/ui/cmd/index.rst b/doc_cn/ui/cmd/index.rst deleted file mode 100644 index 31a8b8a79f4a87101bd6030eb4e779fd11d65811..0000000000000000000000000000000000000000 --- a/doc_cn/ui/cmd/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -命令 -==== - -安装好PaddlePaddle后,在命令行直接敲击 ``paddle`` 或 ``paddle --help`` 会显示如下一些命令。 - -* ``train`` Start a paddle_trainer - 启动一个PaddlePaddle训练进程。 ``paddle train`` 可以通过命令行参数 ``-local=true`` 启动一个单机的训练进程;也可以和 ``paddle pserver`` 一起使用启动多机的分布式训练进程。 -* ``pserver`` Start a paddle_pserver_main - 在多机分布式训练下启动PaddlePaddle的parameter server进程。 -* ``version`` Print paddle version - 用于打印当前PaddlePaddle的版本和编译选项相关信息。常见的输出格式如下:1)第一行说明了PaddlePaddle的版本信息;2)第二行开始说明了一些主要的编译选项,具体意义可以参考 `编译参数选项文件 <../../build_and_install/cmake/compile_options.html>`_ 。 - - .. literalinclude:: paddle_version.txt - -* ``merge_model`` Start a paddle_merge_model - 用于将PaddlePaddle的模型参数文件和模型配置文件打包成一个文件,方便做部署分发。 -* ``dump_config`` Dump the trainer config as proto string - 用于将PaddlePaddle的模型配置文件以proto string的格式打印出来。 -* ``make_diagram`` - 使用graphviz对PaddlePaddle的模型配置文件进行绘制。 \ No newline at end of file diff --git a/doc_cn/ui/cmd/paddle_version.txt b/doc_cn/ui/cmd/paddle_version.txt deleted file mode 100644 index 33e2e4de7c24afd481eb6ca7eabed4924863d2b7..0000000000000000000000000000000000000000 --- a/doc_cn/ui/cmd/paddle_version.txt +++ /dev/null @@ -1,11 +0,0 @@ -PaddlePaddle 0.8.0b, compiled with - with_avx: ON - with_gpu: ON - with_double: OFF - with_python: ON - with_rdma: OFF - with_glog: ON - with_gflags: ON - with_metric_learning: OFF - with_timer: OFF - with_predict_sdk: OFF diff --git a/doc_cn/ui/index.rst b/doc_cn/ui/index.rst deleted file mode 100644 index ff36c9adb690f4126cf6ee332a9f0b09648270bd..0000000000000000000000000000000000000000 --- a/doc_cn/ui/index.rst +++ /dev/null @@ -1,32 +0,0 @@ -######## -用户接口 -######## - -数据提供 -======== - -.. toctree:: - :maxdepth: 1 - - data_provider/dataprovider.rst - data_provider/pydataprovider2.rst - -命令及命令行参数 -================ - -.. toctree:: - :maxdepth: 1 - - cmd/index.rst - -* `参数用例 <../../doc/ui/cmd_argument/use_case.html>`_ -* `参数分类 <../../doc/ui/cmd_argument/argument_outline.html>`_ -* `参数描述 <../../doc/ui/cmd_argument/detail_introduction.html>`_ - -预测 -======= - -.. toctree:: - :maxdepth: 1 - - predict/swig_py_paddle.rst diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index 9b2d122a09adabd766014a9d21a167eec5b2de32..6ad1d79e59b11b2c1f7aacf22d13347b3fd8e0e2 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -17,22 +17,18 @@ add_library(paddle_api STATIC ${API_SOURCES}) add_dependencies(paddle_api gen_proto_cpp) +list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH) -if(WITH_GFLAGS) - list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH) - - if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}") - # Because gflags compiled by cmake, so it is imported by cmake target, - # not a real library path. Get the real library path here. - message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}") - get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION) - message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}") - else() - set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES}) - endif() +if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}") +# Because gflags compiled by cmake, so it is imported by cmake target, +# not a real library path. Get the real library path here. +message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}") +get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION) +message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}") +else() +set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES}) endif() - configure_file( paddle_api_config.py.in ${PROJ_ROOT}/paddle/api/paddle_api_config.py @@ -57,7 +53,7 @@ add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/dist/.timestamp paddle_trainer paddle_api paddle_cuda - ${PY_PADDLE_PYTHON_FILES} + ${PY_PADDLE_PYTHON_FILES} ) install(DIRECTORY ${PROJ_ROOT}/paddle/dist/ diff --git a/paddle/api/Trainer.cpp b/paddle/api/Trainer.cpp index 59b47d4b1c7b6d586e89624c155d7ba6f3885eb6..d83dc380beeec3747451a483f4811eb833e8c226 100644 --- a/paddle/api/Trainer.cpp +++ b/paddle/api/Trainer.cpp @@ -27,9 +27,9 @@ limitations under the License. */ using paddle::real; -P_DECLARE_string(config); -P_DECLARE_string(init_model_path); -P_DECLARE_int32(start_pass); +DECLARE_string(config); +DECLARE_string(init_model_path); +DECLARE_int32(start_pass); struct TrainerPrivate : public paddle::Trainer { bool _trainOneBatch(size_t batchSize); diff --git a/paddle/api/paddle_api_config.py.in b/paddle/api/paddle_api_config.py.in index a2352250c31efa7ee3c4c8338d95dce5a5b9a511..23542b952b7699d66cf64b47d0354e9078ae06d9 100644 --- a/paddle/api/paddle_api_config.py.in +++ b/paddle/api/paddle_api_config.py.in @@ -8,9 +8,7 @@ CMAKE_DL_LIBS="@CMAKE_DL_LIBS@" WITH_PYTHON="@WITH_PYTHON@" PYTHON_LIBRARIES="@PYTHON_LIBRARIES@" -WITH_GLOG="@WITH_GLOG@" LIBGLOG_LIBRARY="@LIBGLOG_LIBRARY@" -WITH_GFLAGS="@WITH_GFLAGS@" GFLAGS_LIBRARIES="@GFLAGS_LIBRARIES@" GFLAGS_LOCATION="@GFLAGS_LOCATION@" CBLAS_LIBRARIES="@CBLAS_LIBS@" diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index 85cc54700faceb5a514cebe665a2da5ed2f7aa3c..51d7dfee58b786512201577872559ae510051ba9 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -47,10 +47,8 @@ try: self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON) self.python_libs = PYTHON_LIBRARIES - self.with_glog = PaddleLDFlag.cmake_bool(WITH_GLOG) self.glog_libs = LIBGLOG_LIBRARY - self.with_gflags = PaddleLDFlag.cmake_bool(WITH_GFLAGS) self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS) self.gflags_libs = GFLAGS_LIBRARIES self.gflags_location = GFLAGS_LOCATION @@ -88,6 +86,8 @@ try: "-lpaddle_cuda", "-lpaddle_api", self.normalize_flag(self.protolib), + self.normalize_flag(self.glog_libs), + self.normalize_flag(self.gflags_libs), self.normalize_flag(self.zlib), self.normalize_flag(self.thread), self.normalize_flag(self.dl_libs), @@ -96,10 +96,6 @@ try: if self.with_python: libs.append(self.normalize_flag(self.python_libs)) - if self.with_glog: - libs.append(self.normalize_flag(self.glog_libs)) - if self.with_gflags: - libs.append(self.normalize_flag(self.gflags_libs)) if self.with_gpu: libs.append(self.normalize_flag(self.curt)) if self.with_coverage: diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc index 7111224d599f0d67395254a95d7f63110a6a87c4..8cddf10d40c6277c6bb29a4fe11e5845a2770213 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/cuda/src/hl_cuda_cudnn.cc @@ -21,10 +21,10 @@ limitations under the License. */ #include "paddle/utils/CommandLineParser.h" #include "paddle/utils/Logging.h" -P_DEFINE_int32(cudnn_conv_workspace_limit_in_mb, - 4096, - "Specify cuDNN max workspace limit, in units MB, " - "4096MB=4GB by default."); +DEFINE_int32(cudnn_conv_workspace_limit_in_mb, + 4096, + "Specify cuDNN max workspace limit, in units MB, " + "4096MB=4GB by default."); namespace dynload { diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/cuda/src/hl_cuda_device.cc index b0bba73594d0f7d4aba02745d78da68f0baa3f8a..a71eecba2736234dafaf6b67e5efac5358a30871 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/cuda/src/hl_cuda_device.cc @@ -12,6 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// clang-format off +// Because clang-format 4.X and clang-format 3.8+ format +// following lines in different. So disable clang-format. #include "hl_cuda.h" #include #include @@ -19,10 +22,12 @@ limitations under the License. */ #include #include #include +#include "hl_cuda.h" #include "hl_cuda.ph" #include "hl_dso_loader.h" #include "hl_thread.ph" #include "paddle/utils/Logging.h" +// clang-format on namespace dynload { diff --git a/paddle/cuda/src/hl_dso_loader.cc b/paddle/cuda/src/hl_dso_loader.cc index f509b8924319dcaa48adaf1de32fec03e45d61c5..54c7620fc081f681d9d33bcd711008fa5029df05 100644 --- a/paddle/cuda/src/hl_dso_loader.cc +++ b/paddle/cuda/src/hl_dso_loader.cc @@ -16,21 +16,21 @@ limitations under the License. */ #include "paddle/utils/CommandLineParser.h" #include "paddle/utils/Logging.h" -P_DEFINE_string(cudnn_dir, - "", - "Specify path for loading libcudnn.so. For instance, " - "/usr/local/cudnn/lib. If empty [default], dlopen " - "will search cudnn from LD_LIBRARY_PATH"); - -P_DEFINE_string(cuda_dir, - "", - "Specify path for loading cuda library, such as libcublas, " - "libcurand. For instance, /usr/local/cuda/lib64. (Note: " - "libcudart can not be specified by cuda_dir, since some " - "build-in function in cudart already ran before main entry). " - "If default, dlopen will search cuda from LD_LIBRARY_PATH"); - -P_DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so."); +DEFINE_string(cudnn_dir, + "", + "Specify path for loading libcudnn.so. For instance, " + "/usr/local/cudnn/lib. If empty [default], dlopen " + "will search cudnn from LD_LIBRARY_PATH"); + +DEFINE_string(cuda_dir, + "", + "Specify path for loading cuda library, such as libcublas, " + "libcurand. For instance, /usr/local/cuda/lib64. (Note: " + "libcudart can not be specified by cuda_dir, since some " + "build-in function in cudart already ran before main entry). " + "If default, dlopen will search cuda from LD_LIBRARY_PATH"); + +DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so."); static inline std::string join(const std::string& part1, const std::string& part2) { diff --git a/paddle/gserver/dataproviders/ProtoDataProvider.cpp b/paddle/gserver/dataproviders/ProtoDataProvider.cpp index d16ecca2d977478e7e7f8819f3b5a5ea48e69b07..c6f5cab1915b7f41d505c37a7fef762a392bad7f 100644 --- a/paddle/gserver/dataproviders/ProtoDataProvider.cpp +++ b/paddle/gserver/dataproviders/ProtoDataProvider.cpp @@ -22,9 +22,9 @@ limitations under the License. */ #include "DataProviderGroup.h" #include "paddle/utils/Logging.h" -P_DEFINE_double(memory_threshold_on_load_data, - 1.0, - "stop loading data when memory is not sufficient"); +DEFINE_double(memory_threshold_on_load_data, + 1.0, + "stop loading data when memory is not sufficient"); namespace paddle { diff --git a/paddle/gserver/evaluators/Evaluator.cpp b/paddle/gserver/evaluators/Evaluator.cpp index 7556d21e01e0314d3ee17fa37642081174ec41f3..2f9928191170aa6cf25417362cb360b5e2865b69 100644 --- a/paddle/gserver/evaluators/Evaluator.cpp +++ b/paddle/gserver/evaluators/Evaluator.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/gserver/gradientmachines/NeuralNetwork.h" -P_DECLARE_int32(trainer_id); +DECLARE_int32(trainer_id); namespace paddle { diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index a7324f55451e696176a040b470c2d3bdf9eaa392..88c098b3559d8d2918309aa48329af067f79bdd5 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -21,11 +21,11 @@ limitations under the License. */ #include "NeuralNetwork.h" #include "ParallelNeuralNetwork.h" -P_DEFINE_bool(allow_only_one_model_on_one_gpu, - true, - "If true, do not allow multiple models on one GPU device"); +DEFINE_bool(allow_only_one_model_on_one_gpu, + true, + "If true, do not allow multiple models on one GPU device"); #ifdef PADDLE_METRIC_LEARNING -P_DECLARE_bool(external); +DECLARE_bool(external); #endif namespace paddle { diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp index ee1c92bdf531d9e5cc4bbd63c4f6d91b32b8cee9..8f68b3d66bd263b8df34801878efee3e2de2622d 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -24,7 +24,7 @@ limitations under the License. */ #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" -P_DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so"); +DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so"); static const char* DIY_CALC_PROB_SYMBOL_NAME = "calc_prob"; static const char* DIY_START_CALC_PROB_SYMBOL_NAME = "start_calc_prob"; diff --git a/paddle/gserver/layers/DataLayer.cpp b/paddle/gserver/layers/DataLayer.cpp index 66f0606a38cf597c2697ef1d9e9419ea0e94ef5a..3551df4e172f0237685127b0b3869554d9c5f97d 100644 --- a/paddle/gserver/layers/DataLayer.cpp +++ b/paddle/gserver/layers/DataLayer.cpp @@ -54,7 +54,7 @@ void DataLayer::copyDataToOutput(Argument& output) { output.setFrameWidth(config_.width()); } else { output.setFrameHeight(data_.getFrameHeight()); - output.setFrameHeight(data_.getFrameHeight()); + output.setFrameWidth(data_.getFrameWidth()); } output.cpuSequenceDims = data_.cpuSequenceDims; output.sequenceStartPositions = data_.sequenceStartPositions; diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index c9e121047b5fa1297cfca7c268205594f079e1e3..c47943f81c01589eada4b825d54be5c69314b6fa 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -33,7 +33,7 @@ limitations under the License. */ #include "TransLayer.h" #include "ValidationLayer.h" -P_DEFINE_bool(log_error_clipping, false, "enable log error clipping or not"); +DEFINE_bool(log_error_clipping, false, "enable log error clipping or not"); namespace paddle { diff --git a/paddle/gserver/layers/LstmLayer.cpp b/paddle/gserver/layers/LstmLayer.cpp index 452091eff42083537f37d89b8f8464851f2e36db..2543d1b49a801943819e05bc52e53eaeafae1edf 100644 --- a/paddle/gserver/layers/LstmLayer.cpp +++ b/paddle/gserver/layers/LstmLayer.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/math/Matrix.h" #include "paddle/utils/Stat.h" -P_DECLARE_bool(prev_batch_state); +DECLARE_bool(prev_batch_state); namespace paddle { diff --git a/paddle/gserver/layers/RecurrentLayer.cpp b/paddle/gserver/layers/RecurrentLayer.cpp index 9f3bf76a2dcf42b0ede0f21a241e83de39c5944b..85812c9d660e07e915012a7337e621c10a6597ca 100644 --- a/paddle/gserver/layers/RecurrentLayer.cpp +++ b/paddle/gserver/layers/RecurrentLayer.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/utils/CommandLineParser.h" #include "paddle/utils/Stat.h" -P_DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation."); +DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation."); namespace paddle { diff --git a/paddle/gserver/layers/ValidationLayer.h b/paddle/gserver/layers/ValidationLayer.h index 471055429d34bee591cf7e66cd28221a8ebd83ed..4c1de7b3b7d6975c2693eb065f7d3e19cc51a95c 100644 --- a/paddle/gserver/layers/ValidationLayer.h +++ b/paddle/gserver/layers/ValidationLayer.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/gserver/evaluators/Evaluator.h" -P_DECLARE_int32(trainer_id); +DECLARE_int32(trainer_id); namespace paddle { diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index dffc24936faa2d855ae2eb762efbcc1c0f545943..1d5e7de1ba624d98c953efe1cdd2318548c4e914 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "LayerGradUtil.h" -P_DECLARE_bool(thread_local_rand_use_global_seed); +DECLARE_bool(thread_local_rand_use_global_seed); namespace paddle { real getCostSum(LayerPtr& testLayer, MatrixPtr weights) { diff --git a/paddle/gserver/tests/TestUtil.cpp b/paddle/gserver/tests/TestUtil.cpp index e656da5b8f7c0f9ebbc094c0e1548423ea060f50..e07c60861a4a6567fd1e28559b9806cb623a3bdf 100644 --- a/paddle/gserver/tests/TestUtil.cpp +++ b/paddle/gserver/tests/TestUtil.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/math/SparseMatrix.h" #include "paddle/utils/CommandLineParser.h" -P_DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length"); +DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length"); namespace paddle { diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp index 20a6126d0b69f71eecc439854c8f97f94ec53de5..7d7e68da5c5a9dbcba024002a988f26f7613b724 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -25,8 +25,8 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); -P_DECLARE_bool(thread_local_rand_use_global_seed); +DECLARE_bool(use_gpu); +DECLARE_bool(thread_local_rand_use_global_seed); void testActivation(const string& act) { LOG(INFO) << "test activation: " << act; diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/gserver/tests/test_BatchNorm.cpp index 3bd4e321b7d073055ea8e9d97020379276de8cdf..7f5fcb670b70aed9f0a04180d344556a0390122f 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/gserver/tests/test_BatchNorm.cpp @@ -27,11 +27,11 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); -P_DECLARE_int32(gpu_id); -P_DECLARE_double(checkgrad_eps); -P_DECLARE_bool(thread_local_rand_use_global_seed); -P_DECLARE_bool(prev_batch_state); +DECLARE_bool(use_gpu); +DECLARE_int32(gpu_id); +DECLARE_double(checkgrad_eps); +DECLARE_bool(thread_local_rand_use_global_seed); +DECLARE_bool(prev_batch_state); // Test that the batchNormLayer can be followed by a ConvLayer TEST(Layer, batchNorm) { diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 83100e3bec7e61c7d7751754ace760729e9adb27..99202c2d5702a9569c3a9a92897a8a0e38b8e2a6 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -28,11 +28,11 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); -P_DECLARE_int32(gpu_id); -P_DECLARE_double(checkgrad_eps); -P_DECLARE_bool(thread_local_rand_use_global_seed); -P_DECLARE_bool(prev_batch_state); +DECLARE_bool(use_gpu); +DECLARE_int32(gpu_id); +DECLARE_double(checkgrad_eps); +DECLARE_bool(thread_local_rand_use_global_seed); +DECLARE_bool(prev_batch_state); // Test that the convTrans forward is the same as conv backward TEST(Layer, convTransLayerFwd) { diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/gserver/tests/test_ConvUnify.cpp index 02763406a34da16da52fa0247c6c469c2418a914..2ab18f886848d198b9063c7559790497ce131efe 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/gserver/tests/test_ConvUnify.cpp @@ -28,11 +28,11 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); -P_DECLARE_int32(gpu_id); -P_DECLARE_double(checkgrad_eps); -P_DECLARE_bool(thread_local_rand_use_global_seed); -P_DECLARE_bool(prev_batch_state); +DECLARE_bool(use_gpu); +DECLARE_int32(gpu_id); +DECLARE_double(checkgrad_eps); +DECLARE_bool(thread_local_rand_use_global_seed); +DECLARE_bool(prev_batch_state); // Do one forward pass of convTrans layer and check to see if its output // matches the given result diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/gserver/tests/test_Evaluator.cpp index 7a930aebcf4ae7ab163c497d4d9545fdcf4f8eb5..e07066dad84aa6326c2447fc5ee80fa496735fbf 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/gserver/tests/test_Evaluator.cpp @@ -21,9 +21,9 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); -P_DECLARE_int32(gpu_id); -P_DECLARE_bool(thread_local_rand_use_global_seed); +DECLARE_bool(use_gpu); +DECLARE_int32(gpu_id); +DECLARE_bool(thread_local_rand_use_global_seed); enum InputType { INPUT_DATA, // dense vector diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 9f8b197df554a67ebcdd3cff2c5d7d91bfc1932d..8a8d094ed357a6565dd9827c4bb10b76db6a146a 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -26,11 +26,11 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); -P_DECLARE_int32(gpu_id); -P_DECLARE_double(checkgrad_eps); -P_DECLARE_bool(thread_local_rand_use_global_seed); -P_DECLARE_bool(prev_batch_state); +DECLARE_bool(use_gpu); +DECLARE_int32(gpu_id); +DECLARE_double(checkgrad_eps); +DECLARE_bool(thread_local_rand_use_global_seed); +DECLARE_bool(prev_batch_state); TEST(Operator, dot_mul) { TestConfig config; diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index baa55aa0252cb63db7c4aa92cf9b6933199273fa..fc60228f816e0cea30ef764c59a8c7875ed4a0e8 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -25,10 +25,10 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_int32(gpu_id); -P_DECLARE_double(checkgrad_eps); -P_DEFINE_bool(use_label, true, "input label or sequence label"); -P_DEFINE_bool(static_para, false, "static parameter"); +DECLARE_int32(gpu_id); +DECLARE_double(checkgrad_eps); +DEFINE_bool(use_label, true, "input label or sequence label"); +DEFINE_bool(static_para, false, "static parameter"); struct DataIn { std::vector inArgs; @@ -267,8 +267,8 @@ TEST(Compare, img_conv2) { } #endif -P_DEFINE_string(config_file_a, "", "config of one network to compare"); -P_DEFINE_string(config_file_b, "", "config of another network to compare"); +DEFINE_string(config_file_a, "", "config of one network to compare"); +DEFINE_string(config_file_b, "", "config of another network to compare"); TEST(Compare, network) { if (FLAGS_config_file_a != "" && FLAGS_config_file_b != "") { compareNetwork(FLAGS_config_file_a, FLAGS_config_file_b); diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/gserver/tests/test_PyDataProvider2.cpp index 436318d35634f4ba46781a125ace110551029439..5f8bc5ecd0f77efc6dcda0330f124ca6cab7f277 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/gserver/tests/test_PyDataProvider2.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/utils/PythonUtil.h" #include "paddle/utils/Util.h" -P_DEFINE_string(train_list, "unittest.list", "file list for unittest"); +DEFINE_string(train_list, "unittest.list", "file list for unittest"); namespace paddle { namespace unittest { diff --git a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp b/paddle/gserver/tests/test_RecurrentGradientMachine.cpp index a351667d8b18b734022820a777c551bb11a243bf..874aabf37ccf6029bdb9b844cf144c0165154eb9 100644 --- a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp +++ b/paddle/gserver/tests/test_RecurrentGradientMachine.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include #include -P_DECLARE_int32(seed); +DECLARE_int32(seed); using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_RecurrentLayer.cpp b/paddle/gserver/tests/test_RecurrentLayer.cpp index cd96ca7c848afd7a2aa38df3343bee102aa5e83a..f91c788863b6963df92b735dbfef2bacee1fff45 100644 --- a/paddle/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/gserver/tests/test_RecurrentLayer.cpp @@ -23,9 +23,9 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); -P_DECLARE_bool(rnn_use_batch); -P_DECLARE_int32(fixed_seq_length); +DECLARE_bool(use_gpu); +DECLARE_bool(rnn_use_batch); +DECLARE_int32(fixed_seq_length); void checkError(const Matrix& matrix1, const Matrix& matrix2) { CHECK(matrix1.getHeight() == matrix2.getHeight()); diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/gserver/tests/test_SelectiveFCLayer.cpp index 4f3a95a535b0a47df88ac5ba7a367d25e57e5f74..ab23d00a2cb6077147f5b89664a8e2437b4cd63b 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/gserver/tests/test_SelectiveFCLayer.cpp @@ -29,11 +29,11 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); -P_DECLARE_int32(num_passes); -P_DECLARE_string(config); -P_DECLARE_string(init_model_path); -P_DECLARE_string(config_args); +DECLARE_bool(use_gpu); +DECLARE_int32(num_passes); +DECLARE_string(config); +DECLARE_string(init_model_path); +DECLARE_string(config_args); size_t fcLayerWidth = 1024; diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/gserver/tests/test_WarpCTCLayer.cpp index 700425412cebe8416f900fa702503db98722c0ee..0a4a814d5247410248f7418e1ef2c79a2da42507 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/gserver/tests/test_WarpCTCLayer.cpp @@ -25,7 +25,7 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_bool(use_gpu); +DECLARE_bool(use_gpu); const real* getData(const Matrix& matrix) { if (matrix.useGpu()) { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 4342ca52a3d6677bea312531cd562895621218c5..5685cb7bcbbb6b90687790953d676e3792f36f36 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -408,7 +408,7 @@ public: LOG(FATAL) << "Not implemented"; } - virtual void addBias(Matrix& b, real scale, bool sharedBias) { + void addBias(Matrix& b, real scale, bool sharedBias) { if (!sharedBias) { addBias(b, scale); } else { @@ -425,7 +425,7 @@ public: LOG(FATAL) << "Not implemented"; } - virtual void collectBias(Matrix& a, real scale, bool sharedBias) { + void collectBias(Matrix& a, real scale, bool sharedBias) { if (!sharedBias) { collectBias(a, scale); } else { diff --git a/paddle/math/SparseRowMatrix.cpp b/paddle/math/SparseRowMatrix.cpp index 3091743123af03561f91dfb8b03e65087310ce64..b61c6b2d49ccead5e9cfdf595a8bebae0e5b87b5 100644 --- a/paddle/math/SparseRowMatrix.cpp +++ b/paddle/math/SparseRowMatrix.cpp @@ -24,9 +24,9 @@ limitations under the License. */ #include "paddle/utils/Thread.h" #include "paddle/utils/Util.h" -P_DEFINE_bool(allow_inefficient_sparse_update, - false, - "Whether to allow inefficient sparse update"); +DEFINE_bool(allow_inefficient_sparse_update, + false, + "Whether to allow inefficient sparse update"); namespace paddle { diff --git a/paddle/math/SparseRowMatrix.h b/paddle/math/SparseRowMatrix.h index badb4b9c1cce4d93c24aac47c8ed742c4d7d38fa..9364feb4a1462a5a9d16ca0f69213ba32ad97d21 100644 --- a/paddle/math/SparseRowMatrix.h +++ b/paddle/math/SparseRowMatrix.h @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/utils/CommandLineParser.h" #include "paddle/utils/Util.h" -P_DECLARE_bool(allow_inefficient_sparse_update); +DECLARE_bool(allow_inefficient_sparse_update); namespace paddle { diff --git a/paddle/math/Storage.cpp b/paddle/math/Storage.cpp index f9a2c12cd539ab4785847a58d3fedfc384e05232..56e5442394b04230c22d668aa734dc0fa44004c2 100644 --- a/paddle/math/Storage.cpp +++ b/paddle/math/Storage.cpp @@ -16,9 +16,9 @@ limitations under the License. */ #include "Allocator.h" #include "paddle/utils/Util.h" -P_DEFINE_int32(pool_limit_size, - 536870912, - "maximum memory size managed by a memory pool, default is 512M"); +DEFINE_int32(pool_limit_size, + 536870912, + "maximum memory size managed by a memory pool, default is 512M"); namespace paddle { diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt index fe5177291c21c3505c3694201b36b54397150ccf..a3ea078509704f305672d0b02d272de0f6c97f51 100644 --- a/paddle/math/tests/CMakeLists.txt +++ b/paddle/math/tests/CMakeLists.txt @@ -16,12 +16,10 @@ add_simple_unittest(test_CpuGpuVector) add_simple_unittest(test_Allocator) if(WITH_GPU) - if(COMPILER_SUPPORT_CXX11) - CUDA_ADD_EXECUTABLE(test_Tensor test_Tensor.cu) - link_paddle_test(test_Tensor) - CUDA_ADD_EXECUTABLE(test_lazyAssign test_lazyAssign.cu) - link_paddle_test(test_lazyAssign) - endif() + CUDA_ADD_EXECUTABLE(test_Tensor test_Tensor.cu) + link_paddle_test(test_Tensor) + CUDA_ADD_EXECUTABLE(test_lazyAssign test_lazyAssign.cu) + link_paddle_test(test_lazyAssign) else() compile_cu_as_cpp(test_Tensor.cu) add_unittest(test_Tensor test_Tensor.cu) diff --git a/paddle/math/tests/test_TrainingAlgorithm.cpp b/paddle/math/tests/test_TrainingAlgorithm.cpp index 1bf6a0cc43ea16c955b1b1cd7ef61d2e7100726a..2c458cba9ca11e9af8a98b88a6392978c2a9be77 100644 --- a/paddle/math/tests/test_TrainingAlgorithm.cpp +++ b/paddle/math/tests/test_TrainingAlgorithm.cpp @@ -22,9 +22,9 @@ limitations under the License. */ using namespace paddle; // NOLINT #ifndef PADDLE_TYPE_DOUBLE -P_DEFINE_double(max_diff, 1e-5, "max diff allowed"); +DEFINE_double(max_diff, 1e-5, "max diff allowed"); #else -P_DEFINE_double(max_diff, 1e-13, "max diff allowed"); +DEFINE_double(max_diff, 1e-13, "max diff allowed"); #endif class SetMaxDiff { diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index e91daa371768e6f7f3846f95027cc6320926052c..65d01a15718ae2bebd4869eff0e5407524bc0e7c 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -245,6 +245,8 @@ int32_t Argument::resizeAndCopyFrom(const Argument& src, bool useGpu, hl_stream_t stream) { dataId = src.dataId; + frameWidth = src.frameWidth; + frameHeight = src.frameHeight; if (!src.sequenceStartPositions) { // non-sequence input, copy samples directly diff --git a/paddle/parameter/FirstOrderOptimizer.cpp b/paddle/parameter/FirstOrderOptimizer.cpp index 630f15c8cfbe6cca16094d0eb6677702339aa730..dbb738e98b5874f5bb33026ad585a6c3ef327d1d 100644 --- a/paddle/parameter/FirstOrderOptimizer.cpp +++ b/paddle/parameter/FirstOrderOptimizer.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include -P_DEFINE_bool(log_clipping, false, "enable log clipping or not"); +DEFINE_bool(log_clipping, false, "enable log clipping or not"); namespace paddle { diff --git a/paddle/parameter/Parameter.cpp b/paddle/parameter/Parameter.cpp index 986ae1539b6ef7745e94be6101e94b40c287be94..1673fc6e533e416dfe4db557a1a8968667d1bfff 100644 --- a/paddle/parameter/Parameter.cpp +++ b/paddle/parameter/Parameter.cpp @@ -26,11 +26,11 @@ limitations under the License. */ #include "paddle/utils/CommandLineParser.h" #include "paddle/utils/Logging.h" -P_DEFINE_int32(enable_grad_share, - (100 * 1024 * 1024), - "threshold for enable gradient parameter share for batch " - "multi-cpu training"); -P_DEFINE_int32( +DEFINE_int32(enable_grad_share, + (100 * 1024 * 1024), + "threshold for enable gradient parameter share for batch " + "multi-cpu training"); +DEFINE_int32( grad_share_block_num, 64, "block number of gradient parameter share for batch multi-cpu training"); diff --git a/paddle/pserver/BaseClient.cpp b/paddle/pserver/BaseClient.cpp index a43def98c528c2cfbc65a40aa54b5d4a49961a34..b4ac7a2506921b2409baaff077cc3541f3dc8d73 100644 --- a/paddle/pserver/BaseClient.cpp +++ b/paddle/pserver/BaseClient.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/utils/CommandLineParser.h" #include "paddle/utils/Stat.h" -P_DECLARE_string(pservers); +DECLARE_string(pservers); namespace paddle { diff --git a/paddle/pserver/LightNetwork.cpp b/paddle/pserver/LightNetwork.cpp index 329dfb0fb38e8ad377539d8af6be228595baa719..cbc105e651faa0f283b3becb10449f4e1bc78b38 100644 --- a/paddle/pserver/LightNetwork.cpp +++ b/paddle/pserver/LightNetwork.cpp @@ -31,23 +31,23 @@ limitations under the License. */ #include "paddle/utils/Util.h" /// quick ack can reduce the latency of small message -P_DEFINE_bool(small_messages, - false, - "if message size is small, recommend set it True to enable quick " - "ack and no delay"); +DEFINE_bool(small_messages, + false, + "if message size is small, recommend set it True to enable quick " + "ack and no delay"); /// reasonable sock_send_buf_size can control the traffic injected into switch /// network. Injecting too many data into traffic could cause packets loss which /// cause long latency and degrade the efficiency of communication. -P_DEFINE_int32(sock_send_buf_size, - 1024 * 1024 * 40, - "restrict sock send buff size, can reduce network congestion if " - "set carefully"); +DEFINE_int32(sock_send_buf_size, + 1024 * 1024 * 40, + "restrict sock send buff size, can reduce network congestion if " + "set carefully"); /// reasonable size can hold bursted packets and reduce packets loss -P_DEFINE_int32(sock_recv_buf_size, - 1024 * 1024 * 40, - "restrict sock recv buff size"); +DEFINE_int32(sock_recv_buf_size, + 1024 * 1024 * 40, + "restrict sock recv buff size"); namespace paddle { diff --git a/paddle/pserver/ParameterClient2.cpp b/paddle/pserver/ParameterClient2.cpp index 86fd1c5276c97f2de86b8c8eb8627721611d099c..a97859f83fe6495b298e920346c964ef2a9b146c 100644 --- a/paddle/pserver/ParameterClient2.cpp +++ b/paddle/pserver/ParameterClient2.cpp @@ -20,8 +20,8 @@ limitations under the License. */ #include "paddle/utils/Stat.h" #include "paddle/utils/StringUtil.h" -P_DEFINE_string(pservers, "127.0.0.1", "Comma separated addresses of pservers"); -P_DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send"); +DEFINE_string(pservers, "127.0.0.1", "Comma separated addresses of pservers"); +DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send"); namespace paddle { diff --git a/paddle/pserver/ParameterClient2.h b/paddle/pserver/ParameterClient2.h index 52553949498e1938c6dbbf8004946692cd1bfe0b..eed71ccb43b0fec76a74a7f00662c32c97c26ff4 100644 --- a/paddle/pserver/ParameterClient2.h +++ b/paddle/pserver/ParameterClient2.h @@ -34,7 +34,7 @@ limitations under the License. */ #include "ProtoServer.h" #include "SparseParameterDistribution.h" -P_DECLARE_int32(parallel_thread_num); +DECLARE_int32(parallel_thread_num); namespace paddle { diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index 2cb4c93535ffe2012abb45628cc0cfc329846fd9..856fa0ad1ab30e3fc554ac96dd3bed71b1548579 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -30,11 +30,11 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/Stat.h" -P_DEFINE_int32(pserver_num_threads, 1, "number of threads for sync op exec"); -P_DEFINE_double(async_lagged_ratio_min, - 1.0, - "control config_.async_lagged_grad_discard_ratio() min value"); -P_DEFINE_double( +DEFINE_int32(pserver_num_threads, 1, "number of threads for sync op exec"); +DEFINE_double(async_lagged_ratio_min, + 1.0, + "control config_.async_lagged_grad_discard_ratio() min value"); +DEFINE_double( async_lagged_ratio_default, 1.5, "if async_lagged_grad_discard_ratio is not set in trainer_config.conf" diff --git a/paddle/pserver/ParameterServer2.h b/paddle/pserver/ParameterServer2.h index 61c139981e479cf6528dfb44ab6ae2f82b33bcc5..b0cf22e1fb158e76fcee1ce6ef1f375995803ce6 100644 --- a/paddle/pserver/ParameterServer2.h +++ b/paddle/pserver/ParameterServer2.h @@ -38,7 +38,7 @@ limitations under the License. */ #include "ProtoServer.h" -P_DECLARE_int32(port); +DECLARE_int32(port); namespace paddle { diff --git a/paddle/pserver/SparseParameterDistribution.cpp b/paddle/pserver/SparseParameterDistribution.cpp index 0068f85b52be75fd2f958ad7a1e2ae76949b1fee..6dd725db30cd6d50539d1b2b30ab9e42a081c7b3 100644 --- a/paddle/pserver/SparseParameterDistribution.cpp +++ b/paddle/pserver/SparseParameterDistribution.cpp @@ -20,26 +20,26 @@ limitations under the License. */ #include "SparseParameterDistribution.h" -P_DEFINE_bool(check_sparse_distribution_in_pserver, - false, - "check whether sparse parameter exhibts balanced distribution at " - "all pservers"); -P_DEFINE_bool(show_check_sparse_distribution_log, - false, - "show logs details for sparse parameter distribution in pserver"); -P_DEFINE_int32(check_sparse_distribution_batches, - 100, - "run sparse parameter distribution check for N batches"); -P_DEFINE_double( +DEFINE_bool(check_sparse_distribution_in_pserver, + false, + "check whether sparse parameter exhibts balanced distribution at " + "all pservers"); +DEFINE_bool(show_check_sparse_distribution_log, + false, + "show logs details for sparse parameter distribution in pserver"); +DEFINE_int32(check_sparse_distribution_batches, + 100, + "run sparse parameter distribution check for N batches"); +DEFINE_double( check_sparse_distribution_ratio, 0.6, "if parameters dispatched to different pservers exhibit unbalanced " " distribution for check_sparse_distribution_ratio * " " check_sparse_distribution_batches times, crash program"); -P_DEFINE_double(check_sparse_distribution_unbalance_degree, - 2.0, - "the ratio of maximum data size and minimun data size for " - "different pserver"); +DEFINE_double(check_sparse_distribution_unbalance_degree, + 2.0, + "the ratio of maximum data size and minimun data size for " + "different pserver"); namespace paddle { diff --git a/paddle/pserver/test/SocketTest.cpp b/paddle/pserver/test/SocketTest.cpp index 6e63c4f67848bea118adde2e3ef0b0c5b64086c9..066a6c02939695e7050a7693365d7c449f70e723 100644 --- a/paddle/pserver/test/SocketTest.cpp +++ b/paddle/pserver/test/SocketTest.cpp @@ -195,9 +195,9 @@ SocketClient::SocketClient(const std::string& serverAddr, int serverPort) { channel_.reset(new SocketChannel(sockfd)); } -P_DEFINE_string(server_addr, "127.0.0.1", "Server address"); -P_DEFINE_int64(dim, 10000000, "Data size"); -P_DEFINE_int32(loop_time, 100000, "test loop time"); +DEFINE_string(server_addr, "127.0.0.1", "Server address"); +DEFINE_int64(dim, 10000000, "Data size"); +DEFINE_int32(loop_time, 100000, "test loop time"); using namespace paddle; // NOLINT diff --git a/paddle/pserver/test/test_ParameterServer2.cpp b/paddle/pserver/test/test_ParameterServer2.cpp index 4257a2308d727fc60cb5b2e7e507ec86c90cd96b..8e7231a9e1aee7b61f8dfa42f1367b79fee81a2b 100644 --- a/paddle/pserver/test/test_ParameterServer2.cpp +++ b/paddle/pserver/test/test_ParameterServer2.cpp @@ -21,9 +21,9 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_int32(num_gradient_servers); -P_DEFINE_string(server_addr, "127.0.0.1", "assign server address"); -P_DEFINE_int32(server_cpu, 0, "assign server cpu"); +DECLARE_int32(num_gradient_servers); +DEFINE_string(server_addr, "127.0.0.1", "assign server address"); +DEFINE_int32(server_cpu, 0, "assign server cpu"); class ParameterServer2Tester : public ParameterServer2 { public: diff --git a/paddle/pserver/test/test_ProtoServer.cpp b/paddle/pserver/test/test_ProtoServer.cpp index 3880dde5e3fb8a986a8fa870cb92a58d138d43de..9f86ee80f4e5cc99ea3597b3ed37a387578f032a 100644 --- a/paddle/pserver/test/test_ProtoServer.cpp +++ b/paddle/pserver/test/test_ProtoServer.cpp @@ -21,10 +21,10 @@ limitations under the License. */ #include "paddle/pserver/ProtoServer.h" #include "paddle/utils/Stat.h" -P_DEFINE_string(server_addr, "127.0.0.1", "Server address"); -P_DEFINE_int64(dim, 50000000, "Data size"); -P_DEFINE_bool(test_proto_server, true, "whether to test ProtoServer"); -P_DEFINE_bool(benchmark, false, "Do benchmark. Skip some tests"); +DEFINE_string(server_addr, "127.0.0.1", "Server address"); +DEFINE_int64(dim, 50000000, "Data size"); +DEFINE_bool(test_proto_server, true, "whether to test ProtoServer"); +DEFINE_bool(benchmark, false, "Do benchmark. Skip some tests"); using namespace paddle; // NOLINT diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index ace2c0dee972e338001a0e5a4045c32e64ff157e..283fd34a6d8a2268f3800ec69920e128ac75e7dc 100644 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -21,8 +21,6 @@ function version(){ echo " with_double: @WITH_DOUBLE@" echo " with_python: @WITH_PYTHON@" echo " with_rdma: @WITH_RDMA@" - echo " with_glog: @WITH_GLOG@" - echo " with_gflags: @WITH_GFLAGS@" echo " with_metric_learning: @WITH_METRIC@" echo " with_timer: @WITH_TIMER@" echo " with_predict_sdk: @WITH_PREDICT_SDK@" diff --git a/paddle/trainer/MergeModel.cpp b/paddle/trainer/MergeModel.cpp index 1cf29a39b92cc26fa6706f857edc1eb2dff29a21..91d89b61a32259b8bbe70fda2579f87ec6b9af00 100644 --- a/paddle/trainer/MergeModel.cpp +++ b/paddle/trainer/MergeModel.cpp @@ -19,8 +19,8 @@ limitations under the License. */ #include "paddle/pserver/ParameterServer2.h" #include "paddle/utils/PythonUtil.h" -P_DEFINE_string(model_dir, "", "Directory for separated model files"); -P_DEFINE_string(model_file, "", "File for merged model file"); +DEFINE_string(model_dir, "", "Directory for separated model files"); +DEFINE_string(model_file, "", "File for merged model file"); using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/trainer/RemoteParameterUpdater.cpp b/paddle/trainer/RemoteParameterUpdater.cpp index b7f7b93b8df091f7ccbe19a639295cba0554399a..974e78fa17d6564414962475f81497491bbb0482 100644 --- a/paddle/trainer/RemoteParameterUpdater.cpp +++ b/paddle/trainer/RemoteParameterUpdater.cpp @@ -17,8 +17,8 @@ limitations under the License. */ #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/Stat.h" -P_DECLARE_int32(trainer_id); -P_DECLARE_string(save_dir); +DECLARE_int32(trainer_id); +DECLARE_string(save_dir); namespace paddle { diff --git a/paddle/trainer/ThreadParameterUpdater.cpp b/paddle/trainer/ThreadParameterUpdater.cpp index bee7f061fed3a01e8292137272c3288334ef70c2..9caa92a4d7557c0c8633d881820862bbbd5df87e 100644 --- a/paddle/trainer/ThreadParameterUpdater.cpp +++ b/paddle/trainer/ThreadParameterUpdater.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/math/SparseRowMatrix.h" #include "paddle/utils/Thread.h" -P_DECLARE_int32(trainer_count); +DECLARE_int32(trainer_count); namespace paddle { diff --git a/paddle/trainer/Trainer.cpp b/paddle/trainer/Trainer.cpp index 85610ec04e3f64dc83575426922ac936a604b3a7..1eec2c432d235ef484b688db08aae8a39f878a85 100644 --- a/paddle/trainer/Trainer.cpp +++ b/paddle/trainer/Trainer.cpp @@ -38,60 +38,56 @@ limitations under the License. */ #include "paddle/gserver/gradientmachines/NeuralNetwork.h" #include "paddle/gserver/layers/ValidationLayer.h" -P_DEFINE_string(config, "", "Trainer config file"); - -P_DEFINE_int32(test_period, - 0, - "if equal 0, do test on all test data at the end of " - "each pass. While if equal non-zero, do test on all test " - "data every test_period batches"); -P_DEFINE_bool(test_all_data_in_one_period, - false, - "This option was deprecated, since we will always do " - "test on all test set "); - -P_DEFINE_bool(local, true, "Train in local mode or not"); - -P_DEFINE_int32(average_test_period, - 0, - "Do test on average parameter every so" - " many batches. MUST be devided by FLAGS_log_period." - " Default 0 means do not test average parameter"); - -P_DEFINE_int32(saving_period, 1, "Save parameteres every so many passes"); -P_DEFINE_int64(saving_period_by_batches, - 0, - "Save parameters every so many batches in one pass"); -P_DEFINE_string(save_dir, "", "Directory for saving model parameter"); -P_DEFINE_int32(start_pass, - 0, - "Start training from this pass. " - "Will load parameter from the previous pass"); -P_DEFINE_int32(test_pass, - -1, - "Will load parameter start from this pass to test"); -P_DEFINE_int32(test_wait, 0, "Waiting for pass parameter if not exist"); -P_DEFINE_bool(with_cost, true, "enable cost layer or not"); -P_DEFINE_bool(distribute_test, false, "test in distribute mode"); - -P_DEFINE_int32(num_passes, 100, "train for so many passes"); - -P_DEFINE_string(config_args, - "", - "arguments passed to config file." - "Format: key1=value1,key2=value2"); - -P_DEFINE_bool(save_only_one, - false, - "Save only parameters in last pass, remove previous."); - -P_DEFINE_string(feat_file, "", "File name of extracted feature."); -P_DEFINE_string(predict_output_dir, - "", - "Directory that saves the predicted results of output layers"); -P_DEFINE_string(model_list, - "", - "File that saves the model list when evaluation"); +DEFINE_string(config, "", "Trainer config file"); + +DEFINE_int32(test_period, + 0, + "if equal 0, do test on all test data at the end of " + "each pass. While if equal non-zero, do test on all test " + "data every test_period batches"); +DEFINE_bool(test_all_data_in_one_period, + false, + "This option was deprecated, since we will always do " + "test on all test set "); + +DEFINE_bool(local, true, "Train in local mode or not"); + +DEFINE_int32(average_test_period, + 0, + "Do test on average parameter every so" + " many batches. MUST be devided by FLAGS_log_period." + " Default 0 means do not test average parameter"); + +DEFINE_int32(saving_period, 1, "Save parameteres every so many passes"); +DEFINE_int64(saving_period_by_batches, + 0, + "Save parameters every so many batches in one pass"); +DEFINE_string(save_dir, "", "Directory for saving model parameter"); +DEFINE_int32(start_pass, + 0, + "Start training from this pass. " + "Will load parameter from the previous pass"); +DEFINE_int32(test_pass, -1, "Will load parameter start from this pass to test"); +DEFINE_int32(test_wait, 0, "Waiting for pass parameter if not exist"); +DEFINE_bool(with_cost, true, "enable cost layer or not"); +DEFINE_bool(distribute_test, false, "test in distribute mode"); + +DEFINE_int32(num_passes, 100, "train for so many passes"); + +DEFINE_string(config_args, + "", + "arguments passed to config file." + "Format: key1=value1,key2=value2"); + +DEFINE_bool(save_only_one, + false, + "Save only parameters in last pass, remove previous."); + +DEFINE_string(feat_file, "", "File name of extracted feature."); +DEFINE_string(predict_output_dir, + "", + "Directory that saves the predicted results of output layers"); +DEFINE_string(model_list, "", "File that saves the model list when evaluation"); namespace paddle { diff --git a/paddle/trainer/Trainer.h b/paddle/trainer/Trainer.h index cabbb4acd1135f74aca0a4a36116db412c2e89f9..7cbf18ace7a5fed053653c73e62d36c388b15123 100644 --- a/paddle/trainer/Trainer.h +++ b/paddle/trainer/Trainer.h @@ -34,7 +34,7 @@ limitations under the License. */ #include "paddle/internals/metric_learning/MetricTrainer.h" #endif -P_DECLARE_int32(num_passes); +DECLARE_int32(num_passes); namespace paddle { diff --git a/paddle/trainer/TrainerBenchmark.cpp b/paddle/trainer/TrainerBenchmark.cpp index 5c3177c8083e11ebf6b6854f86dbee8299d7e3b1..173653c81688fe4606731c68ea1854268b3f4590 100644 --- a/paddle/trainer/TrainerBenchmark.cpp +++ b/paddle/trainer/TrainerBenchmark.cpp @@ -18,9 +18,9 @@ limitations under the License. */ #include "paddle/utils/Stat.h" #include "paddle/utils/Util.h" -P_DECLARE_int32(test_period); +DECLARE_int32(test_period); -P_DEFINE_bool(feed_data, false, "Wether to read data from DataProvider."); +DEFINE_bool(feed_data, false, "Wether to read data from DataProvider."); namespace paddle { diff --git a/paddle/trainer/TrainerConfigHelper.cpp b/paddle/trainer/TrainerConfigHelper.cpp index 2017a08d20d494cbce8c3beba564ed07c1d7cc73..60ac8459a12db801321da4a9d9c1d48ac8bd6d16 100644 --- a/paddle/trainer/TrainerConfigHelper.cpp +++ b/paddle/trainer/TrainerConfigHelper.cpp @@ -18,16 +18,16 @@ limitations under the License. */ #include "paddle/utils/Flags.h" #include "paddle/utils/PythonUtil.h" -P_DECLARE_string(config); -P_DECLARE_string(init_model_path); -P_DECLARE_int32(start_pass); -P_DECLARE_string(save_dir); -P_DECLARE_int32(trainer_id); -P_DECLARE_bool(local); -P_DECLARE_bool(with_cost); -P_DECLARE_bool(with_gpu); -P_DECLARE_bool(parallel_nn); -P_DECLARE_string(config_args); +DECLARE_string(config); +DECLARE_string(init_model_path); +DECLARE_int32(start_pass); +DECLARE_string(save_dir); +DECLARE_int32(trainer_id); +DECLARE_bool(local); +DECLARE_bool(with_cost); +DECLARE_bool(with_gpu); +DECLARE_bool(parallel_nn); +DECLARE_string(config_args); const char *kConfigParserModuleName = "paddle.trainer.config_parser"; const char *kConfigParserFuncName = "parse_config_and_serialize"; diff --git a/paddle/trainer/TrainerInternalConfig.cpp b/paddle/trainer/TrainerInternalConfig.cpp index a017cdec9d06a51ddf0925280f3b60cc2dc1c17a..039fcdb524527d5e8bfa829fc403b6f2fa789991 100644 --- a/paddle/trainer/TrainerInternalConfig.cpp +++ b/paddle/trainer/TrainerInternalConfig.cpp @@ -14,17 +14,17 @@ limitations under the License. */ #include "TrainerInternalConfig.h" -P_DEFINE_int32(show_parameter_stats_period, - 0, - "Whether to show parameter stats during training"); +DEFINE_int32(show_parameter_stats_period, + 0, + "Whether to show parameter stats during training"); -P_DEFINE_int32(dot_period, 1, "Print '.' every so many batches"); +DEFINE_int32(dot_period, 1, "Print '.' every so many batches"); -P_DEFINE_bool(use_old_updater, false, "Use the old RemoteParameterUpdater"); +DEFINE_bool(use_old_updater, false, "Use the old RemoteParameterUpdater"); -P_DECLARE_int32(num_passes); +DECLARE_int32(num_passes); -P_DECLARE_bool(local); +DECLARE_bool(local); namespace paddle { diff --git a/paddle/trainer/TrainerMain.cpp b/paddle/trainer/TrainerMain.cpp index 0a4d56b892a2a6753f83105170ad3162da59264c..947f9cadcc983d58ce31ef462e51dc42e41eaf1b 100644 --- a/paddle/trainer/TrainerMain.cpp +++ b/paddle/trainer/TrainerMain.cpp @@ -22,21 +22,20 @@ limitations under the License. */ #include "Trainer.h" #include "paddle/pserver/RDMANetwork.h" -P_DEFINE_bool(start_pserver, false, "Whether to start pserver"); -P_DECLARE_int32(gpu_id); -P_DEFINE_string(job, "train", "one of (train, test, checkgrad)"); -P_DECLARE_int32(start_pass); -P_DECLARE_string(config); -P_DECLARE_string(init_model_path); -P_DECLARE_string(rdma_tcp); +DEFINE_bool(start_pserver, false, "Whether to start pserver"); +DECLARE_int32(gpu_id); +DEFINE_string(job, "train", "one of (train, test, checkgrad)"); +DECLARE_int32(start_pass); +DECLARE_string(config); +DECLARE_string(init_model_path); +DECLARE_string(rdma_tcp); using namespace paddle; // NOLINT int main(int argc, char** argv) { -// write logs instantly (never buffer log messages) -#ifdef PADDLE_USE_GLOG + // write logs instantly (never buffer log messages) FLAGS_logbuflevel = -1; -#endif + initMain(argc, argv); initPython(argc, argv); diff --git a/paddle/trainer/tests/test_Compare.cpp b/paddle/trainer/tests/test_Compare.cpp index 63fa48540cca81f6b463b50c4af9ea3259a7301d..72fc76bea35e433eeb08ba625b4bf6afdda491fb 100644 --- a/paddle/trainer/tests/test_Compare.cpp +++ b/paddle/trainer/tests/test_Compare.cpp @@ -24,10 +24,10 @@ using namespace std; // NOLINT static const string& configFile = "trainer/tests/sample_trainer_config.conf"; -P_DECLARE_int32(gpu_id); -P_DECLARE_bool(use_gpu); -P_DECLARE_string(config); -P_DECLARE_string(config_args); +DECLARE_int32(gpu_id); +DECLARE_bool(use_gpu); +DECLARE_string(config); +DECLARE_string(config_args); struct comData { vector outArgs; diff --git a/paddle/trainer/tests/test_CompareSparse.cpp b/paddle/trainer/tests/test_CompareSparse.cpp index 3fea3a3c24303b84f78f4029b0ed8e42e419c442..a7000eb77e1bbeab4f6e38c0322f82bde7164080 100644 --- a/paddle/trainer/tests/test_CompareSparse.cpp +++ b/paddle/trainer/tests/test_CompareSparse.cpp @@ -25,22 +25,22 @@ using namespace std; // NOLINT static const string& configFile1 = "trainer/tests/sample_trainer_config_qb_rnn.conf"; -P_DECLARE_bool(use_gpu); -P_DECLARE_string(config); -P_DECLARE_int32(gpu_id); -P_DECLARE_int32(seed); -P_DECLARE_int32(num_passes); -P_DECLARE_int32(saving_period); - -P_DECLARE_int32(num_gradient_servers); -P_DECLARE_int32(port); -P_DECLARE_bool(local); -P_DECLARE_bool(use_old_updater); -P_DECLARE_bool(parallel_nn); -P_DECLARE_string(config_args); -P_DEFINE_double(max_diff_ratio, - 0.0f, - "max diff ratio allowed for parameters value"); +DECLARE_bool(use_gpu); +DECLARE_string(config); +DECLARE_int32(gpu_id); +DECLARE_int32(seed); +DECLARE_int32(num_passes); +DECLARE_int32(saving_period); + +DECLARE_int32(num_gradient_servers); +DECLARE_int32(port); +DECLARE_bool(local); +DECLARE_bool(use_old_updater); +DECLARE_bool(parallel_nn); +DECLARE_string(config_args); +DEFINE_double(max_diff_ratio, + 0.0f, + "max diff ratio allowed for parameters value"); int gNumDevices = 0; diff --git a/paddle/trainer/tests/test_CompareTwoNets.cpp b/paddle/trainer/tests/test_CompareTwoNets.cpp index 8a4556721dda3f73fb11b36e5fdf798df3993ce8..80c61e259e71dd31d7637072248b22a2910c532e 100644 --- a/paddle/trainer/tests/test_CompareTwoNets.cpp +++ b/paddle/trainer/tests/test_CompareTwoNets.cpp @@ -22,25 +22,25 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_int32(gpu_id); +DECLARE_int32(gpu_id); -P_DECLARE_bool(local); -P_DECLARE_bool(use_gpu); +DECLARE_bool(local); +DECLARE_bool(use_gpu); -P_DECLARE_string(config); -P_DECLARE_string(nics); +DECLARE_string(config); +DECLARE_string(nics); -P_DEFINE_string(config_file_a, "", "config of one network to compare"); -P_DEFINE_string(config_file_b, "", "config of another network to compare"); -P_DEFINE_bool(need_high_accuracy, - false, - "whether need to run in double accuracy"); -P_DEFINE_double( +DEFINE_string(config_file_a, "", "config of one network to compare"); +DEFINE_string(config_file_b, "", "config of another network to compare"); +DEFINE_bool(need_high_accuracy, + false, + "whether need to run in double accuracy"); +DEFINE_double( max_diff_ratio, 0.0f, "max diff ratio allowed for outputs and parameters (value/gradient)"); -P_DECLARE_bool(thread_local_rand_use_global_seed); -P_DECLARE_int32(seed); +DECLARE_bool(thread_local_rand_use_global_seed); +DECLARE_int32(seed); struct ComData { vector outArgs; diff --git a/paddle/trainer/tests/test_CompareTwoOpts.cpp b/paddle/trainer/tests/test_CompareTwoOpts.cpp index 673ef289d8f5bfc0f1d6db58eb7d4e7ecba31ae3..383505f8131264844069d6f0fa13f4e0ac1f97af 100644 --- a/paddle/trainer/tests/test_CompareTwoOpts.cpp +++ b/paddle/trainer/tests/test_CompareTwoOpts.cpp @@ -22,20 +22,20 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -P_DECLARE_int32(gpu_id); +DECLARE_int32(gpu_id); -P_DECLARE_bool(local); -P_DECLARE_bool(use_gpu); +DECLARE_bool(local); +DECLARE_bool(use_gpu); -P_DECLARE_string(config); -P_DECLARE_string(nics); +DECLARE_string(config); +DECLARE_string(nics); -P_DEFINE_string(config_file_a, "", "config of one network to compare"); -P_DEFINE_string(config_file_b, "", "config of another network to compare"); -P_DEFINE_bool(need_high_accuracy, - true, - "whether need to run in double accuracy (recommended)"); -P_DEFINE_double( +DEFINE_string(config_file_a, "", "config of one network to compare"); +DEFINE_string(config_file_b, "", "config of another network to compare"); +DEFINE_bool(need_high_accuracy, + true, + "whether need to run in double accuracy (recommended)"); +DEFINE_double( max_diff_ratio, 0.0f, "max diff ratio allowed for outputs and parameters (value/gradient)"); diff --git a/paddle/trainer/tests/test_Prediction.cpp b/paddle/trainer/tests/test_Prediction.cpp index 322121a579440fcf164c042b3265e5d2878e3732..0c79404eee1c0902c5c8e8eefd139da3da584636 100644 --- a/paddle/trainer/tests/test_Prediction.cpp +++ b/paddle/trainer/tests/test_Prediction.cpp @@ -18,11 +18,11 @@ limitations under the License. */ #include -P_DECLARE_string(config); -P_DECLARE_string(config_args); -P_DEFINE_string(merger, - "./paddle_merge_model", - "path to paddle_merge_model binary"); +DECLARE_string(config); +DECLARE_string(config_args); +DEFINE_string(merger, + "./paddle_merge_model", + "path to paddle_merge_model binary"); using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/trainer/tests/test_Trainer.cpp index 0fede59f8d8d62edcdb2d030952d0e738452160a..371282dd6bb9a995bc6ae8b2a5bd708f831d7e33 100644 --- a/paddle/trainer/tests/test_Trainer.cpp +++ b/paddle/trainer/tests/test_Trainer.cpp @@ -28,10 +28,10 @@ static const string& configFile3 = "trainer/tests/chunking.conf"; static const string& configFile4 = "trainer/tests/sample_trainer_config_parallel.conf"; -P_DECLARE_bool(use_gpu); -P_DECLARE_string(config); -P_DECLARE_int32(gpu_id); -P_DECLARE_bool(allow_only_one_model_on_one_gpu); +DECLARE_bool(use_gpu); +DECLARE_string(config); +DECLARE_int32(gpu_id); +DECLARE_bool(allow_only_one_model_on_one_gpu); void checkGradientTest(const string& configFile, bool useGpu, diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/trainer/tests/test_TrainerOnePass.cpp index 0b587ecce176d5b6e8e6e6c4a54cb21fc4e25a67..ee21008aec56da289dab88f72f57a1703e392fad 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/trainer/tests/test_TrainerOnePass.cpp @@ -27,12 +27,12 @@ static const string& configFile1 = "trainer/tests/sample_trainer_config.conf"; static const string& configFile2 = "trainer/tests/sample_trainer_config_parallel.conf"; -P_DECLARE_bool(use_gpu); -P_DECLARE_string(config); -P_DECLARE_int32(gpu_id); -P_DECLARE_int32(seed); -P_DECLARE_int32(num_passes); -P_DECLARE_int32(saving_period); +DECLARE_bool(use_gpu); +DECLARE_string(config); +DECLARE_int32(gpu_id); +DECLARE_int32(seed); +DECLARE_int32(num_passes); +DECLARE_int32(saving_period); class TrainerForTest : public paddle::Trainer { public: @@ -122,10 +122,10 @@ TEST(average_window_cpu, gpu4) { #endif // 3. test trainer + pserver. -P_DECLARE_int32(num_gradient_servers); -P_DECLARE_int32(port); -P_DECLARE_bool(local); -P_DECLARE_bool(use_old_updater); +DECLARE_int32(num_gradient_servers); +DECLARE_int32(port); +DECLARE_bool(local); +DECLARE_bool(use_old_updater); double checkRemoteParameterUpdater(TrainerForTest& trainer) { auto gradientMachine = trainer.getGradientMachine(); diff --git a/paddle/trainer/tests/test_recurrent_machine_generation.cpp b/paddle/trainer/tests/test_recurrent_machine_generation.cpp index 7d8dfd788fd6a336379a96ea5324b8e6f60705ea..03446b3b2f6d5ff42fbf0d735a24d88bd0429747 100644 --- a/paddle/trainer/tests/test_recurrent_machine_generation.cpp +++ b/paddle/trainer/tests/test_recurrent_machine_generation.cpp @@ -30,7 +30,7 @@ static string modelDir = "trainer/tests/rnn_gen_test_model_dir/t1"; // NOLINT static string expectFile = // NOLINT "trainer/tests/rnn_gen_test_model_dir/r1.test"; // NOLINT -P_DECLARE_string(config_args); +DECLARE_string(config_args); vector readRetFile(const string& fname) { ifstream inFile(fname); diff --git a/paddle/utils/BarrierStat.cpp b/paddle/utils/BarrierStat.cpp index 9dde155aca0ec67cca7a0fb8ba9bce4732ffbfa7..a6dbdcae3f32c894d35e8114488d4a3264c6c5f2 100644 --- a/paddle/utils/BarrierStat.cpp +++ b/paddle/utils/BarrierStat.cpp @@ -20,15 +20,15 @@ limitations under the License. */ #include "paddle/utils/Flags.h" #include "paddle/utils/Stat.h" -P_DEFINE_bool(log_barrier_abstract, - true, - "if true, show abstract of barrier performance"); -P_DEFINE_int32(log_barrier_lowest_nodes, - 5, - "how many lowest node will be logged"); -P_DEFINE_bool(log_barrier_show_log, - false, // for performance tuning insight - "if true, always show barrier abstract even with little gap"); +DEFINE_bool(log_barrier_abstract, + true, + "if true, show abstract of barrier performance"); +DEFINE_int32(log_barrier_lowest_nodes, + 5, + "how many lowest node will be logged"); +DEFINE_bool(log_barrier_show_log, + false, // for performance tuning insight + "if true, always show barrier abstract even with little gap"); namespace paddle { diff --git a/paddle/utils/CommandLineParser.cpp b/paddle/utils/CommandLineParser.cpp index 51558b45a143c87be1524cabc0b8a98e8f8bc997..63f16bc54c575a0d5ae02141be3c467ee784b095 100644 --- a/paddle/utils/CommandLineParser.cpp +++ b/paddle/utils/CommandLineParser.cpp @@ -13,220 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CommandLineParser.h" -#ifndef PADDLE_USE_GFLAGS -#include -#include -#include -#include -#include -#include -#include -#include -#include "paddle/utils/StringUtil.h" -namespace paddle { - -static constexpr int kStatusOK = 0; -static constexpr int kStatusInvalid = 1; -static constexpr int kStatusNotFound = 2; - -/** - * \brief: Convert a string to any type value. - * - * \note: It will specialize by type T that is supported. - */ -template -bool StringToValue(const std::string& content, T* value) { - bool ok; - *value = str::toWithStatus(content, &ok); - return ok; -} - -template <> -bool StringToValue(const std::string& content, bool* value) { - std::string tmp = content; - - std::transform(tmp.begin(), tmp.end(), tmp.begin(), [](char in) -> char { - if (in <= 'Z' && in >= 'A') { - return in - ('Z' - 'z'); - } else { - return in; - } - }); // tolower. - - if (tmp == "true" || tmp == "1") { - *value = true; - return true; - } else if (tmp == "false" || tmp == "0") { - *value = false; - return true; - } else { - return false; - } -} - -template <> -bool StringToValue(const std::string& content, - std::string* value) { - *value = content; - return true; -} - -/** - * \brief Parse argument "--blah=blah". - * - * \param argument: The command line argument string, such as "--blah=blah" - * \param [out] extraInfo: The details error message for parse argument. - * \return: kStatusOK, kStatusInvalid, kStatusNotFound - */ -template -int ParseArgument(const std::string& argument, std::string* extraInfo) { - for (auto& command : - flags_internal::CommandLineFlagRegistry::Instance()->commands) { - std::string& name = command.name; - T* value = command.value; - - std::string prefix = "--"; - prefix += name; - prefix += "="; - std::string content; - if (str::startsWith(argument, prefix)) { - content = argument.substr(prefix.size(), argument.size() - prefix.size()); - } else { - prefix = "-"; - prefix += name; - prefix += "="; - if (str::startsWith(argument, prefix)) { - content = - argument.substr(prefix.size(), argument.size() - prefix.size()); - } - } - - if (!content.empty()) { - if (StringToValue(content, value)) { - return kStatusOK; - } else { - *extraInfo = name; - return kStatusInvalid; - } - } - } - return kStatusNotFound; -} - -/** - * @brief ParseBoolArgumentExtra - * parse '--flag_name', '-flag_name' as true; '--noflag_name', '-noflag_name' as - * false - */ -static int ParseBoolArgumentExtra(const std::string& argument, - std::string* extraInfo) { - (void)(extraInfo); // unused extraInfo, just make api same. - - //! @warning: The order and content of prefixes is DESIGNED for parsing - //! command line. The length of prefixes are 1, 2, 3, 4. The parse logic takes - //! use of this fact. DO NOT CHANGE IT without reading how to parse command - //! below. - static const std::vector> prefixes = { - {"-", true}, {"--", true}, {"-no", false}, {"--no", false}}; - - for (flags_internal::CommandLineFlagRegistry::Command& command : - flags_internal::CommandLineFlagRegistry::Instance()->commands) { - if (argument.size() > command.name.size()) { - //! Use the length of prefix is 1, 2, 3, 4. - size_t diff = argument.size() - command.name.size() - 1UL; - if (diff < prefixes.size()) { - const std::string& prefix = std::get<0>(prefixes[diff]); - if (argument == prefix + command.name) { - *command.value = std::get<1>(prefixes[diff]); - return kStatusOK; - } - } - } - } - return kStatusNotFound; -} - -/** - * \brief: Print command line arguments' usage with type T. - */ -template -static void PrintTypeUsage() { - for (auto& command : - flags_internal::CommandLineFlagRegistry::Instance()->commands) { - std::string& name = command.name; - name = "--" + name; // Program will exit, so modify name is safe. - std::string& desc = command.text; - T& defaultValue = command.defaultValue; - std::cerr << std::setw(20) << name << ": " << desc - << "[default:" << defaultValue << "]." << std::endl; - } -} - -template -static void PrintTypeUsages() { - int unused[] = {0, (PrintTypeUsage(), 0)...}; - (void)(unused); -} -/** - * \brief: Print all usage, and exit(1) - */ -static void PrintUsageAndExit(const char* argv0) { - std::cerr << "Program " << argv0 << " Flags: " << std::endl; - PrintTypeUsages(); - exit(1); -} - -/** - * \brief: Print the error flags, usage, and exit. - */ -static void PrintParseError(const std::string& name, - const char* actualInput, - const char* arg0) { - std::cerr << "Parse command flag " << name << " error! User input is " - << actualInput << std::endl; - PrintUsageAndExit(arg0); -} - -void ParseCommandLineFlags(int* argc, char** argv, bool withHelp) { - int unused_argc = 1; - std::string extra; - for (int i = 1; i < *argc; ++i) { - std::string arg = argv[i]; - int s = kStatusInvalid; -#define ParseArgumentWithType(type) \ - s = ParseArgument(arg, &extra); \ - if (s == kStatusOK) { \ - continue; \ - } else if (s == kStatusInvalid) { \ - PrintParseError(extra, argv[i], argv[0]); \ - } - - ParseArgumentWithType(bool); // NOLINT - ParseArgumentWithType(int32_t); - ParseArgumentWithType(double); // NOLINT - ParseArgumentWithType(int64_t); - ParseArgumentWithType(uint64_t); - ParseArgumentWithType(std::string); - -#undef ParseArgumentWithType - s = ParseBoolArgumentExtra(arg, &extra); - if (s == kStatusOK) { - continue; - } - - if (withHelp && (arg == "--help" || arg == "-h")) { - PrintUsageAndExit(argv[0]); - } - - // NOT Found for all flags. - std::swap(argv[unused_argc++], argv[i]); - } - *argc = unused_argc; -} - -} // namespace paddle -#else namespace paddle { #ifndef GFLAGS_NS #define GFLAGS_NS google @@ -243,4 +30,3 @@ void ParseCommandLineFlags(int* argc, char** argv, bool withHelp) { } } // namespace paddle -#endif diff --git a/paddle/utils/CommandLineParser.h b/paddle/utils/CommandLineParser.h index b4449c6f095f101847c029e02fb0cb087f12f754..4e89f90bb910cee1adc7fb8dace81ff58435351f 100644 --- a/paddle/utils/CommandLineParser.h +++ b/paddle/utils/CommandLineParser.h @@ -13,167 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#ifndef PADDLE_USE_GFLAGS -#include -#include -#include -#include "DisableCopy.h" -namespace paddle { - -namespace flags_internal { - -/** - * Command line flag registry for special type T. It will store all command - * arguments settings. such as name, default value. - */ -template -struct CommandLineFlagRegistry { - /** - * The factory method of CommandLineFlagRegistry - * - * \return: The singleton instance of CommandLineFlagRegistry. - */ - static CommandLineFlagRegistry* Instance() { - static CommandLineFlagRegistry instance_; - return &instance_; - } - - struct Command { - /// name of argument. - std::string name; - /// address of actual variable. such as FLAGS_xxx. - T* value; - /// usage text. - std::string text; - /// default value of this command. - T defaultValue; - }; - - /// the command line arguments of type T. - std::vector commands; - - DISABLE_COPY(CommandLineFlagRegistry); - -private: - inline CommandLineFlagRegistry() {} -}; - -/** - *Helper class to register command line flag. - */ -template -struct CommandLineFlagRegister { - /** - * \brief: Register a command line argument - * - * \param [in] name: The command line name. - * \param [inout] val: The command line argument instance, FLAGS_xxx. - * \param [in] desc: The command line helper message. - */ - CommandLineFlagRegister(const std::string& name, - T* val, - const std::string desc) { - CommandLineFlagRegistry::Instance()->commands.push_back( - {name, val, desc, *val}); - } -}; - -/** - * \brief: Define a command line arguments. - * - * \param type: The variable type, such as int, double, etc. - * \param name: The variable name. The command line argument is '--name', the - *variable - *is 'FLAGS_name' - * \param default_value: The default value of command line argument. - * \param text: The description in command line argument. - */ -#define PADDLE_DEFINE_variable(type, name, default_value, text) \ - type FLAGS_##name = default_value; \ - namespace paddle_flags_internal { \ - paddle::flags_internal::CommandLineFlagRegister \ - flags_internal_var_##name(#name, &FLAGS_##name, text); \ - } // namespace paddle_flags_internal - -/** - * Declare a variable to use. - */ -#define PADDLE_DECLARE_variable(type, name) extern type FLAGS_##name; - -// DEFINE macro for each types. -#define P_DEFINE_int32(name, default_value, text) \ - PADDLE_DEFINE_variable(int32_t, name, default_value, text) - -#define P_DEFINE_bool(name, default_value, text) \ - PADDLE_DEFINE_variable(bool, name, default_value, text) - -#define P_DEFINE_string(name, default_value, text) \ - PADDLE_DEFINE_variable(std::string, name, default_value, text) - -#define P_DEFINE_double(name, default_value, text) \ - PADDLE_DEFINE_variable(double, name, default_value, text) - -#define P_DEFINE_int64(name, default_value, text) \ - PADDLE_DEFINE_variable(int64_t, name, default_value, text) - -#define P_DEFINE_uint64(name, default_value, text) \ - PADDLE_DEFINE_variable(uint64_t, name, default_value, text) - -// Declare macro for each types. -#define P_DECLARE_int32(name) PADDLE_DECLARE_variable(int32_t, name) -#define P_DECLARE_bool(name) PADDLE_DECLARE_variable(bool, name) -#define P_DECLARE_string(name) PADDLE_DECLARE_variable(std::string, name) -#define P_DECLARE_double(name) PADDLE_DECLARE_variable(double, name) -#define P_DECLARE_int64(name) PADDLE_DECLARE_variable(int64_t, name) -#define P_DECLARE_uint64(name) PADDLE_DECLARE_variable(uint64_t, name) -} // namespace flags_internal - -/** - * \brief Parse command line flags. If parse error, just failed and exit 1. - * - * \param [inout] argc: The command argument count. This method will modify - *argc, and left unused arguments. - * \param [inout] argv: The command argument values. This method will modify - *argv, and left unused arguments. - * \param [in] withHelp: True will parse '-h' and '--help' to print usage. - * - * \note: The Command line flags format basically as follow: - * - * * If the type of flag is not bool, then the follow format of command line - * will be parsed: - * * --flag_name=value - * * -flag_name=value - * - * * If the flag is bool, then: - * * --flag_name=value, -flag_name=value will be parsed. - * * if value.tolower() == "true"| "1" will be treated as true. - * * else if value.tolower() == "false" | "0" will be treated as false. - * * --flag_name will be parsed as true. - * * --noflag_name will be parsed as false. - */ -void ParseCommandLineFlags(int* argc, char** argv, bool withHelp = true); - -} // namespace paddle - -#else // if use gflags. #include -#define P_DEFINE_int32 DEFINE_int32 -#define P_DEFINE_bool DEFINE_bool -#define P_DEFINE_string DEFINE_string -#define P_DEFINE_double DEFINE_double -#define P_DEFINE_int64 DEFINE_int64 -#define P_DEFINE_uint64 DEFINE_uint64 -#define P_DECLARE_int32 DECLARE_int32 -#define P_DECLARE_bool DECLARE_bool -#define P_DECLARE_string DECLARE_string -#define P_DECLARE_double DECLARE_double -#define P_DECLARE_int64 DECLARE_int64 -#define P_DECLARE_uint64 DECLARE_uint64 namespace paddle { void ParseCommandLineFlags(int* argc, char** argv, bool withHelp = true); } // namespace paddle - -#endif diff --git a/paddle/utils/CustomStackTrace.cpp b/paddle/utils/CustomStackTrace.cpp index 083f5c509a26cd06d6fc6cea2a7587c7ef57d4e6..66b38218a7c7ec146f366ded516ebe22d012e47f 100644 --- a/paddle/utils/CustomStackTrace.cpp +++ b/paddle/utils/CustomStackTrace.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "CommandLineParser.h" -P_DEFINE_bool( +DEFINE_bool( layer_stack_error_only_current_thread, true, "Dump current thread or whole process layer stack when signal error " diff --git a/paddle/utils/Flags.cpp b/paddle/utils/Flags.cpp index 1c9e602f45a818824a34aca23ef8f52a5e14cd17..59d6cbdc513660b87cb013d8aa92c5c8f9289ecb 100644 --- a/paddle/utils/Flags.cpp +++ b/paddle/utils/Flags.cpp @@ -15,65 +15,61 @@ limitations under the License. */ #include "Flags.h" #ifdef PADDLE_ONLY_CPU -P_DEFINE_bool(use_gpu, false, "Only support CPU training"); +DEFINE_bool(use_gpu, false, "Only support CPU training"); #else -P_DEFINE_bool(use_gpu, true, "Whether to use GPU for training"); +DEFINE_bool(use_gpu, true, "Whether to use GPU for training"); #endif -P_DEFINE_bool( - parallel_nn, - false, - "Whether to use multi-threads to calculate one neural network." - "If it was set false, use gpu_id specify which gpu core to use" - "(the device property in the trainer config file will be ingored)." - "If it was set true, the gpu core is specified by the trainer" - " config file(gpu_id will be ignored)."); -P_DEFINE_int32(trainer_count, 1, "Defined how many trainers to train"); -P_DEFINE_int32(gpu_id, 0, "Which gpu core to use"); -P_DEFINE_int32(port, 20134, "Listening port for pserver"); -P_DEFINE_int32(data_server_port, 21134, "Listening port for dserver"); -P_DEFINE_int32(ports_num, - 1, - "The ports number for parameter send," - " increment based on default port number"); -P_DEFINE_int32(ports_num_for_sparse, - 0, - "The ports number for parameter send," - " increment based on default (port + ports_num)"); -P_DEFINE_string(nics, "xgbe0,xgbe1", "network device name for pservers"); -P_DEFINE_string(rdma_tcp, "tcp", "use rdma or tcp rdma transport protocol"); -P_DEFINE_int32( - trainer_id, - 0, - "For distributed training, each trainer must be given an unique id" - " ranging from 0 to num_trainers-1. Trainer 0 is the master" - " trainer"); -P_DEFINE_int32(num_gradient_servers, 1, "number of gradient servers"); -P_DEFINE_string(comment, "", "A string for commenting this training task"); -P_DEFINE_string(load_missing_parameter_strategy, - "fail", - "which operation to take on load model fails. support " - "fail/rand/zero only."); -P_DEFINE_int32(log_period, 100, "Log progress every so many batches"); -P_DEFINE_int32(log_period_server, - 500, - "Log progress every so many batches at pserver end"); -P_DEFINE_double(checkgrad_eps, 1e-5, "parameter change size for checkgrad"); -P_DEFINE_int32(enable_parallel_vector, - 0, - "threshold for enable parallel vector"); -P_DEFINE_bool(loadsave_parameters_in_pserver, - false, - "load and save parameters in pserver. " - "only work while parameter set sparse_remote_update."); -P_DEFINE_int32(beam_size, - 1, - "Beam size used in generating most probable output sequences."); +DEFINE_bool(parallel_nn, + false, + "Whether to use multi-threads to calculate one neural network." + "If it was set false, use gpu_id specify which gpu core to use" + "(the device property in the trainer config file will be ingored)." + "If it was set true, the gpu core is specified by the trainer" + " config file(gpu_id will be ignored)."); +DEFINE_int32(trainer_count, 1, "Defined how many trainers to train"); +DEFINE_int32(gpu_id, 0, "Which gpu core to use"); +DEFINE_int32(port, 20134, "Listening port for pserver"); +DEFINE_int32(data_server_port, 21134, "Listening port for dserver"); +DEFINE_int32(ports_num, + 1, + "The ports number for parameter send," + " increment based on default port number"); +DEFINE_int32(ports_num_for_sparse, + 0, + "The ports number for parameter send," + " increment based on default (port + ports_num)"); +DEFINE_string(nics, "xgbe0,xgbe1", "network device name for pservers"); +DEFINE_string(rdma_tcp, "tcp", "use rdma or tcp rdma transport protocol"); +DEFINE_int32(trainer_id, + 0, + "For distributed training, each trainer must be given an unique id" + " ranging from 0 to num_trainers-1. Trainer 0 is the master" + " trainer"); +DEFINE_int32(num_gradient_servers, 1, "number of gradient servers"); +DEFINE_string(comment, "", "A string for commenting this training task"); +DEFINE_string(load_missing_parameter_strategy, + "fail", + "which operation to take on load model fails. support " + "fail/rand/zero only."); +DEFINE_int32(log_period, 100, "Log progress every so many batches"); +DEFINE_int32(log_period_server, + 500, + "Log progress every so many batches at pserver end"); +DEFINE_double(checkgrad_eps, 1e-5, "parameter change size for checkgrad"); +DEFINE_int32(enable_parallel_vector, 0, "threshold for enable parallel vector"); +DEFINE_bool(loadsave_parameters_in_pserver, + false, + "load and save parameters in pserver. " + "only work while parameter set sparse_remote_update."); +DEFINE_int32(beam_size, + 1, + "Beam size used in generating most probable output sequences."); -P_DEFINE_bool(show_layer_stat, false, "show the statistics of each layer"); -P_DEFINE_string(predict_file, "", "File name for saving predict result"); -P_DEFINE_bool(prev_batch_state, false, "batch is continue with next batch"); -P_DEFINE_string(init_model_path, - "", - "Path of the initial model parameters." - "If it was set, start_pass will be ignored."); +DEFINE_bool(show_layer_stat, false, "show the statistics of each layer"); +DEFINE_string(predict_file, "", "File name for saving predict result"); +DEFINE_bool(prev_batch_state, false, "batch is continue with next batch"); +DEFINE_string(init_model_path, + "", + "Path of the initial model parameters." + "If it was set, start_pass will be ignored."); diff --git a/paddle/utils/Flags.h b/paddle/utils/Flags.h index 922533d63e7f0c28a1dcec6b4d9f453f1794abb5..2ebbcb24eb061531d0807756528d7bf16e6aa124 100644 --- a/paddle/utils/Flags.h +++ b/paddle/utils/Flags.h @@ -16,28 +16,28 @@ limitations under the License. */ #include "CommandLineParser.h" -P_DECLARE_bool(parallel_nn); -P_DECLARE_int32(async_count); -P_DECLARE_int32(port); -P_DECLARE_int32(data_server_port); -P_DECLARE_bool(use_gpu); -P_DECLARE_int32(gpu_id); -P_DECLARE_int32(trainer_count); -P_DECLARE_int32(ports_num); -P_DECLARE_int32(ports_num_for_sparse); -P_DECLARE_string(nics); -P_DECLARE_string(rdma_tcp); -P_DECLARE_int32(trainer_id); -P_DECLARE_int32(num_gradient_servers); -P_DECLARE_string(comment); -P_DECLARE_string(load_missing_parameter_strategy); -P_DECLARE_int32(log_period); -P_DECLARE_int32(log_period_server); -P_DECLARE_double(checkgrad_eps); -P_DECLARE_int32(enable_parallel_vector); -P_DECLARE_bool(loadsave_parameters_in_pserver); -P_DECLARE_int32(beam_size); -P_DECLARE_bool(show_layer_stat); -P_DECLARE_string(predict_file); -P_DECLARE_bool(prev_batch_state); -P_DECLARE_string(init_model_path); +DECLARE_bool(parallel_nn); +DECLARE_int32(async_count); +DECLARE_int32(port); +DECLARE_int32(data_server_port); +DECLARE_bool(use_gpu); +DECLARE_int32(gpu_id); +DECLARE_int32(trainer_count); +DECLARE_int32(ports_num); +DECLARE_int32(ports_num_for_sparse); +DECLARE_string(nics); +DECLARE_string(rdma_tcp); +DECLARE_int32(trainer_id); +DECLARE_int32(num_gradient_servers); +DECLARE_string(comment); +DECLARE_string(load_missing_parameter_strategy); +DECLARE_int32(log_period); +DECLARE_int32(log_period_server); +DECLARE_double(checkgrad_eps); +DECLARE_int32(enable_parallel_vector); +DECLARE_bool(loadsave_parameters_in_pserver); +DECLARE_int32(beam_size); +DECLARE_bool(show_layer_stat); +DECLARE_string(predict_file); +DECLARE_bool(prev_batch_state); +DECLARE_string(init_model_path); diff --git a/paddle/utils/Logging.cpp b/paddle/utils/Logging.cpp index 20f32466a56ac4c67c16dfbea229bc78f9f4f6d9..5a1c6ecb2219f7983609c27f3215c7fc1e9e9ef2 100644 --- a/paddle/utils/Logging.cpp +++ b/paddle/utils/Logging.cpp @@ -18,175 +18,9 @@ limitations under the License. */ */ #include "Logging.h" -#ifndef PADDLE_USE_GLOG -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include namespace paddle { -namespace internal { - -std::string join(const std::string& part1, const std::string& part2) { - const char sep = '/'; - if (!part2.empty() && part2.front() == sep) { - return part2; - } - std::string ret; - ret.reserve(part1.size() + part2.size() + 1); - ret = part1; - if (!ret.empty() && ret.back() != sep) { - ret += sep; - } - ret += part2; - return ret; -} - -static inline bool env2bool(const char* envName, bool defaultValue = false) { - char* envValue = getenv(envName); - if (envValue == nullptr) { - return defaultValue; - } else { - return memchr("tTyY1\0", envValue[0], 6) != nullptr; - } -} - -static inline int env2int(const char* envName, int defaultValue = 0) { - char* envValue = getenv(envName); - if (envValue == nullptr) { - return defaultValue; - } else { - int retValue = defaultValue; - try { - retValue = std::stoi(envValue); - } catch (...) { - // pass - } - return retValue; - } -} - -static inline int env2index(const char* envName, - const std::vector& options, - int defaultValue) { - char* envValue = getenv(envName); - if (envValue == nullptr) { - return defaultValue; - } else { - for (size_t i = 0; i < options.size(); ++i) { - if (options[i] == envValue) { - return static_cast(i); - } - } - return defaultValue; - } -} - -static bool gLogToStderr = env2bool("PLOG_LOGTOSTDERR", true); -static const std::vector gLevelName = { - "INFO", "WARNING", "ERROR", "FATAL"}; -static int gMinLogLevel = - env2int("PLOG_MINLOGLEVEL", env2index("PLOG_MINLOGLEVEL", gLevelName, 0)); - -static std::vector> gLogFds; -static std::vector gLogFileFds; -static bool gLogInited = false; -static void freeLogFileFds() { - for (auto fd : gLogFileFds) { - close(fd); - } -} - -static void initializeLogFds(char* argv0) { - gLogFds.resize(NUM_SEVERITIES); - - for (int i = gMinLogLevel; i < NUM_SEVERITIES && gLogToStderr; - ++i) { // Add stderr - std::vector& fds = gLogFds[i]; - fds.push_back(STDERR_FILENO); - } - - char* logDir = getenv("PLOG_LOGDIR"); - - for (int i = gMinLogLevel; i < NUM_SEVERITIES && logDir != nullptr; ++i) { - std::string filename = - join(logDir, std::string(argv0) + "." + gLevelName[i]); - int fd = open(filename.c_str(), O_CREAT | O_WRONLY, 0644); - if (fd == -1) { - fprintf(stderr, "Open log file error!"); - exit(1); - } - gLogFileFds.push_back(fd); - - std::vector& curFds = gLogFds[i]; - curFds.insert(curFds.end(), gLogFileFds.begin(), gLogFileFds.end()); - } - - atexit(freeLogFileFds); - gLogInited = true; -} - -static void (*gFailureFunctionPtr)() ATTR_NORETURN = abort; - -LogMessage::LogMessage(const char* fname, int line, int severity) - : fname_(fname), line_(line), severity_(severity) {} - -LogMessage::~LogMessage() { this->generateLogMessage(); } - -void LogMessage::generateLogMessage() { - if (!gLogInited) { - fprintf(stderr, - "%c %s:%d] %s\n", - "IWEF"[severity_], - fname_, - line_, - str().c_str()); - } else { - for (auto& fd : gLogFds[this->severity_]) { - dprintf(fd, - "%c %s:%d] %s\n", - "IWEF"[severity_], - fname_, - line_, - str().c_str()); - } - } -} - -LogMessageFatal::LogMessageFatal(const char* file, int line) - : LogMessage(file, line, FATAL) {} - -LogMessageFatal::~LogMessageFatal() { - generateLogMessage(); - gFailureFunctionPtr(); -} -} // namespace internal - -void initializeLogging(int argc, char** argv) { - internal::initializeLogFds(argv[0]); -} - -namespace logging { -void setMinLogLevel(int level) { paddle::internal::gMinLogLevel = level; } - -void installFailureFunction(void (*callback)() ATTR_NORETURN) { - paddle::internal::gFailureFunctionPtr = callback; -} - -} // namespace logging - -} // namespace paddle - -#else -namespace paddle { void initializeLogging(int argc, char** argv) { (void)(argc); if (!getenv("GLOG_logtostderr")) { @@ -197,13 +31,16 @@ void initializeLogging(int argc, char** argv) { } namespace logging { + void setMinLogLevel(int level) { FLAGS_minloglevel = level; } + void installFailureFunction(void (*callback)()) { google::InstallFailureFunction(callback); } + void installFailureWriter(void (*callback)(const char*, int)) { google::InstallFailureWriter(callback); } + } // namespace logging } // namespace paddle -#endif diff --git a/paddle/utils/Logging.h b/paddle/utils/Logging.h index 4379289f6d1b4b5a1006bd723c2177a686ed89f6..d9e551f0891fa0808b8699aea94a0d2ab4f81cb3 100644 --- a/paddle/utils/Logging.h +++ b/paddle/utils/Logging.h @@ -22,175 +22,21 @@ limitations under the License. */ #include #include -#ifndef PADDLE_USE_GLOG -#include "CompilerMacros.h" - -//! TODO(yuyang18): Move this utility macro into some global header. -#define PP_CAT(a, b) PP_CAT_I(a, b) -#define PP_CAT_I(a, b) PP_CAT_II(~, a##b) -#define PP_CAT_II(p, res) res - -/** - * Generate Unique Variable Name, Usefully in macro. - * @SEE - * http://stackoverflow.com/questions/1082192/how-to-generate-random-variable-names-in-c-using-macros - */ -#define UNIQUE_NAME(base) PP_CAT(base, __LINE__) - +#include namespace paddle { -//! Log levels. -const int INFO = 0; -const int WARNING = 1; -const int ERROR = 2; -const int FATAL = 3; -const int NUM_SEVERITIES = 4; - -namespace internal { - -class LogMessage : public std::basic_ostringstream { -public: - LogMessage(const char* fname, int line, int severity); - ~LogMessage(); - -protected: - /** - * @brief Print log message to stderr, files, etc. - */ - void generateLogMessage(); - -private: - const char* fname_; - int line_; - int severity_; -}; - -// LogMessageFatal ensures the process will exit in failure after -// logging this message. -class LogMessageFatal : public LogMessage { -public: - LogMessageFatal(const char* file, int line) __attribute__((cold)); - ~LogMessageFatal() __attribute__((noreturn)); -}; - -#define _P_LOG_INFO \ - ::paddle::internal::LogMessage(__FILE__, __LINE__, paddle::INFO) -#define _P_LOG_WARNING \ - ::paddle::internal::LogMessage(__FILE__, __LINE__, paddle::WARNING) -#define _P_LOG_ERROR \ - ::paddle::internal::LogMessage(__FILE__, __LINE__, paddle::ERROR) -#define _P_LOG_FATAL ::paddle::internal::LogMessageFatal(__FILE__, __LINE__) - -#define P_LOG(severity) _P_LOG_##severity - -#define P_LOG_FIRST_N(severity, n) \ - static int UNIQUE_NAME(LOG_OCCURRENCES) = 0; \ - if (UNIQUE_NAME(LOG_OCCURRENCES) <= n) ++UNIQUE_NAME(LOG_OCCURRENCES); \ - if (UNIQUE_NAME(LOG_OCCURRENCES) <= n) P_LOG(severity) - -#define P_LOG_IF_EVERY_N(severity, condition, n) \ - static int UNIQUE_NAME(LOG_OCCURRENCES) = 0; \ - if (condition && ((UNIQUE_NAME(LOG_OCCURRENCES) = \ - (UNIQUE_NAME(LOG_OCCURRENCES) + 1) % n) == (1 % n))) \ - P_LOG(severity) - -#define P_LOG_EVERY_N(severity, n) P_LOG_IF_EVERY_N(severity, true, n) - -// TODO(jeff): Define a proper implementation of VLOG_IS_ON -#define P_VLOG_IS_ON(lvl) ((lvl) <= 0) - -#define P_LOG_IF(severity, condition) \ - if (condition) P_LOG(severity) - -#define P_VLOG(lvl) P_LOG_IF(INFO, P_VLOG_IS_ON(lvl)) - -#define P_VLOG_IF(lvl, cond) P_LOG_IF(INFO, P_VLOG_IS_ON(lvl) && cond) - -#define P_VLOG_EVERY_N(lvl, n) P_LOG_IF_EVERY_N(INFO, P_VLOG_IS_ON(lvl), n) - -#define PREDICT_FALSE(x) (__builtin_expect(x, 0)) -#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) - -// CHECK dies with a fatal error if condition is not true. It is *not* -// controlled by NDEBUG, so the check will be executed regardless of -// compilation mode. Therefore, it is safe to do things like: -// CHECK(fp->Write(x) == 4) -#define P_CHECK(condition) \ - if (PREDICT_FALSE(!(condition))) \ - P_LOG(FATAL) << "Check failed: " #condition " " - -#define P_CHECK_EQ(val1, val2) P_CHECK((val1) == (val2)) -#define P_CHECK_NE(val1, val2) P_CHECK((val1) != (val2)) -#define P_CHECK_LE(val1, val2) P_CHECK((val1) <= (val2)) -#define P_CHECK_LT(val1, val2) P_CHECK((val1) < (val2)) -#define P_CHECK_GE(val1, val2) P_CHECK((val1) >= (val2)) -#define P_CHECK_GT(val1, val2) P_CHECK((val1) > (val2)) -#define P_CHECK_NOTNULL(val) P_CHECK((val) != NULL) - -//! GLOG compatible APIs -//! NOTE: only implement Paddle actually used APIs. -#define LOG(x) P_LOG(x) -#define VLOG(x) P_VLOG(x) -#define DLOG(x) P_VLOG(5) -#define CHECK(x) P_CHECK(x) -#define PCHECK(x) P_CHECK(x) -#define CHECK_EQ(val1, val2) P_CHECK((val1) == (val2)) -#define CHECK_NE(val1, val2) P_CHECK((val1) != (val2)) -#define CHECK_LE(val1, val2) P_CHECK((val1) <= (val2)) -#define CHECK_LT(val1, val2) P_CHECK((val1) < (val2)) -#define CHECK_GE(val1, val2) P_CHECK((val1) >= (val2)) -#define CHECK_GT(val1, val2) P_CHECK((val1) > (val2)) -#define CHECK_NOTNULL(val) P_CHECK((val) != NULL) -#define VLOG_IS_ON(x) P_VLOG_IS_ON(x) -#define LOG_FIRST_N(severity, n) P_LOG_FIRST_N(severity, n) -#define LOG_IF(severity, condition) P_LOG_IF(severity, condition) -#define VLOG_EVERY_N(lvl, n) P_VLOG_EVERY_N(lvl, n) -#define VLOG_IF(lvl, cond) P_VLOG_IF(lvl, cond) -#define LOG_EVERY_N(severity, n) P_LOG_EVERY_N(severity, n) -} // namespace internal - -/** - * @brief initialize logging - * @note: Current implement of logging is lack of: - * PrintCallStack when fatal. - * VLOG_IS_ON - * But it is portable to multi-platform, and simple enough to modify. - */ void initializeLogging(int argc, char** argv); -namespace logging { -/** - * @brief Set Min Log Level. if Log.level < minLogLevel, then will not print log - * to stream - * @param level. Any integer is OK, but only 0 <= x <= NUM_SEVERITIES is useful. - */ -void setMinLogLevel(int level); - -/** - * @brief Install Log(Fatal) failure function. Default is abort(); - * @param callback: The failure function. - */ -void installFailureFunction(void (*callback)() ATTR_NORETURN); -/** - * @brief installFailureWriter - * @note: not implemented currently. - */ -inline void installFailureWriter(void (*callback)(const char*, int)) { - (void)(callback); // unused callback. -} -} // namespace logging -} // namespace paddle -#else -#include -namespace paddle { -void initializeLogging(int argc, char** argv); namespace logging { + void setMinLogLevel(int level); + void installFailureFunction(void (*callback)()); + void installFailureWriter(void (*callback)(const char*, int)); -} // namespace logging -} -#endif // PADDLE_USE_GLOG + +} // namespace logging +} // namespace paddle #ifndef NDEBUG #define DEBUG_LEVEL 5 diff --git a/paddle/utils/PythonUtil.cpp b/paddle/utils/PythonUtil.cpp index 2ee4e4fb7ed0e2519d5e7182d6f3adc3431b493a..7faeff55c28b9065179ad27b3b604a9f411249e5 100644 --- a/paddle/utils/PythonUtil.cpp +++ b/paddle/utils/PythonUtil.cpp @@ -20,8 +20,8 @@ namespace paddle { #ifdef PADDLE_NO_PYTHON -P_DEFINE_string(python_path, "", "python path"); -P_DEFINE_string(python_bin, "python2.7", "python bin"); +DEFINE_string(python_path, "", "python path"); +DEFINE_string(python_bin, "python2.7", "python bin"); constexpr int kExecuteCMDBufLength = 204800; diff --git a/paddle/utils/ThreadLocal.cpp b/paddle/utils/ThreadLocal.cpp index 8a2878fc4bafb803a8c4b99ceccfc77405a0a3a0..75ccbd28cf21b7fafb43a072503dff14a29fec8a 100644 --- a/paddle/utils/ThreadLocal.cpp +++ b/paddle/utils/ThreadLocal.cpp @@ -16,9 +16,9 @@ limitations under the License. */ #include "CommandLineParser.h" #include "Util.h" -P_DEFINE_bool(thread_local_rand_use_global_seed, - false, - "Whether to use global seed in thread local rand."); +DEFINE_bool(thread_local_rand_use_global_seed, + false, + "Whether to use global seed in thread local rand."); namespace paddle { diff --git a/paddle/utils/Util.cpp b/paddle/utils/Util.cpp index 26ff385c84564d1188120464f94f418d88c20f19..7c0d66c488f5064641c53ea7995a75c330a3e49d 100644 --- a/paddle/utils/Util.cpp +++ b/paddle/utils/Util.cpp @@ -33,7 +33,7 @@ limitations under the License. */ #include "ThreadLocal.h" #include "Version.h" -P_DEFINE_int32(seed, 1, "random number seed. 0 for srand(time)"); +DEFINE_int32(seed, 1, "random number seed. 0 for srand(time)"); #ifdef WITH_GOOGLE_PERFTOOLS /* @@ -52,10 +52,8 @@ P_DEFINE_int32(seed, 1, "random number seed. 0 for srand(time)"); #include -P_DEFINE_int32(profile_signal, 12, "signal for switch google profiler"); -P_DEFINE_string(profile_data_file, - "gperf.prof", - "file for storing profile data"); +DEFINE_int32(profile_signal, 12, "signal for switch google profiler"); +DEFINE_string(profile_data_file, "gperf.prof", "file for storing profile data"); static void profilerSwitch(int signalNumber) { bool static started = false; diff --git a/paddle/utils/Version.cpp b/paddle/utils/Version.cpp index a9e351b69fcbac661142d0a3322d7bf3a4293cd9..731c30842118bce59ce45297d9c8f47fa0a69d69 100644 --- a/paddle/utils/Version.cpp +++ b/paddle/utils/Version.cpp @@ -18,13 +18,8 @@ limitations under the License. */ #include #include "Flags.h" #include "Util.h" -//! TODO(yuyang18) in gflags, version has another define. Use another flag -//! instead. -#ifndef PADDLE_USE_GFLAGS -P_DEFINE_bool(version, false, "print version"); -#else -P_DECLARE_bool(version); -#endif + +DECLARE_bool(version); namespace paddle { namespace version { diff --git a/paddle/utils/tests/CMakeLists.txt b/paddle/utils/tests/CMakeLists.txt index 298ede5cd6451c9b03219dff72f6e81c374f8ef1..26fafbd1ab3f2967b765b8bcb973fb745c0e6422 100644 --- a/paddle/utils/tests/CMakeLists.txt +++ b/paddle/utils/tests/CMakeLists.txt @@ -1,5 +1,3 @@ -add_simple_unittest(test_CommandLineParser) -add_simple_unittest(test_Logging) add_simple_unittest(test_Thread) add_simple_unittest(test_StringUtils) add_simple_unittest(test_CustomStackTrace) diff --git a/paddle/utils/tests/test_CommandLineParser.cpp b/paddle/utils/tests/test_CommandLineParser.cpp deleted file mode 100644 index ed2b3068d5dda710de728cfad14a98aeaf847954..0000000000000000000000000000000000000000 --- a/paddle/utils/tests/test_CommandLineParser.cpp +++ /dev/null @@ -1,114 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#ifndef PADDLE_USE_GFLAGS -//! Test Command Line Parser for paddle internal implement. - -#include -#include - -P_DEFINE_int32(i1, 1, "test int flag 1"); -P_DEFINE_int32(i2, 2, "test int flag 2"); - -P_DEFINE_string(str1, "1", "test str flag 1"); -P_DEFINE_string(str2, "2", "test str flag 2"); - -P_DEFINE_bool(b1, true, "test bool flag 1"); -P_DEFINE_bool(b2, false, "test bool flag 2"); - -P_DEFINE_double(d1, 0.1, "test double flag 1"); -P_DEFINE_double(d2, -42.3, "test double flag 2"); - -P_DEFINE_int64(l1, 1, "test int64 flag 1"); -P_DEFINE_int64(l2, 2, "test int64 flag 2"); - -P_DEFINE_uint64(ul1, 32, "test uint64 flag 1"); -P_DEFINE_uint64(ul2, 33, "test uint64 flag 2"); - -constexpr double EPSILON = 1e-5; - -#define cc(x) const_cast((x)) - -TEST(CommandLineParser, defaultValue) { - char* argv[] = {cc("test_program"), cc("--unused_flag=134")}; - int argc = sizeof(argv) / sizeof(char*); - - paddle::ParseCommandLineFlags(&argc, argv); - - // Check Default Value - ASSERT_EQ(argc, 2); - ASSERT_EQ(FLAGS_i1, 1); - ASSERT_EQ(FLAGS_i2, 2); - ASSERT_EQ(FLAGS_str1, "1"); - ASSERT_EQ(FLAGS_str2, "2"); - ASSERT_EQ(FLAGS_b1, true); - ASSERT_EQ(FLAGS_b2, false); - ASSERT_NEAR(FLAGS_d1, 0.1, EPSILON); - ASSERT_NEAR(FLAGS_d2, -42.3, EPSILON); - ASSERT_EQ(FLAGS_i1, 1); - ASSERT_EQ(FLAGS_i2, 2); - ASSERT_EQ(FLAGS_ul1, 32UL); - ASSERT_EQ(FLAGS_ul2, 33UL); -} - -TEST(CommandLineParser, normal) { - char* argv[] = {cc("test_program"), - cc("--i2=32"), - cc("--str1=abc"), - cc("--b2=1"), - cc("-b1=False"), - cc("--d2=.34"), - cc("--d1=0"), - cc("--l1=-12345678901234"), - cc("-ul2=3212")}; - int argc = sizeof(argv) / sizeof(char*); - paddle::ParseCommandLineFlags(&argc, argv); - ASSERT_EQ(argc, 1); - ASSERT_EQ(FLAGS_i2, 32); - ASSERT_EQ(FLAGS_str1, "abc"); - ASSERT_EQ(FLAGS_b2, true); - ASSERT_EQ(FLAGS_b1, false); - ASSERT_NEAR(FLAGS_d2, 0.34, EPSILON); - ASSERT_NEAR(FLAGS_d1, 0.0, EPSILON); - ASSERT_EQ(FLAGS_l1, -12345678901234); - ASSERT_EQ(FLAGS_ul2, 3212UL); -} - -TEST(CommandLineParser, printHelp) { - char* argv[] = {cc("test_program"), cc("--help")}; - int argc = sizeof(argv) / sizeof(char*); - - // Will Print Usage - ASSERT_DEATH(paddle::ParseCommandLineFlags(&argc, argv), ".*test_program.*"); -} - -TEST(CommandLineParser, parseError) { - char* argv[] = {cc("test_program"), cc("--i1=abc")}; - - int argc = sizeof(argv) / sizeof(char*); - ASSERT_DEATH( - paddle::ParseCommandLineFlags(&argc, argv), - "Parse command flag i1 error! User input is --i1=abc.*test_program.*"); -} - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - -#else - -int main(int argc, char** argv) { return 0; } - -#endif diff --git a/paddle/utils/tests/test_CustomStackTrace.cpp b/paddle/utils/tests/test_CustomStackTrace.cpp index 292ed4619d8bb0c3f7069cbbea7e729d91dd126c..2ce199837601755ac018889c07c223ad34c4a45b 100644 --- a/paddle/utils/tests/test_CustomStackTrace.cpp +++ b/paddle/utils/tests/test_CustomStackTrace.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/utils/Locks.h" #include "paddle/utils/Util.h" -P_DEFINE_int32(test_thread_num, 10, "testing thread number"); +DEFINE_int32(test_thread_num, 10, "testing thread number"); void testNormalImpl( const std::function&, diff --git a/paddle/utils/tests/test_Logging.cpp b/paddle/utils/tests/test_Logging.cpp deleted file mode 100644 index fbfffcc65aeb9f5ebbe97dcb54ec44bd63f8d4bd..0000000000000000000000000000000000000000 --- a/paddle/utils/tests/test_Logging.cpp +++ /dev/null @@ -1,162 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -/* - * Basically from tensorflow/core/platform/default/logging.cc - * Used in embedded system where there is no glogs. - */ - -#include -#include -#include -#include -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" -#ifndef PADDLE_USE_GLOG -TEST(Logging, BasicalLog) { - auto pinfo = [] { - P_LOG(INFO) << "INFO"; - exit(1); - }; - ASSERT_DEATH(pinfo(), "I .*test_Logging.cpp:[0-9]+] INFO"); - - auto pwarn = [] { - P_LOG(WARNING) << "WARN"; - exit(1); - }; - ASSERT_DEATH(pwarn(), "W .*test_Logging.cpp:[0-9]+] WARN"); - - auto perr = [] { - P_LOG(ERROR) << "ERROR"; - exit(1); - }; - ASSERT_DEATH(perr(), "E .*test_Logging.cpp:[0-9]+] ERROR"); - - auto pfatal = [] { P_LOG(FATAL) << "FATAL"; }; - ASSERT_DEATH(pfatal(), "F .*test_Logging.cpp:[0-9]+] FATAL"); -} - -TEST(Logging, Check) { - int a = 1; - int b = 2; - P_CHECK(a != b); - - auto pcheckDown = [&] { P_CHECK(a == b); }; - ASSERT_DEATH(pcheckDown(), - "F .*test_Logging.cpp:[0-9]+] Check failed: a == b "); - - P_CHECK_LE(a, b); - P_CHECK_LT(a, b); - double t = 1.2; - P_CHECK_LE(a, t); - double* ptr = nullptr; - - auto pcheckDown2 = [&] { P_CHECK_NOTNULL(ptr); }; - ASSERT_DEATH(pcheckDown2(), "F"); -} - -#define cc(x) const_cast(x) - -TEST(Logging, LogToStderr) { - auto logToStderrCallback = [] { - setenv("PLOG_LOGTOSTDERR", "0", true); - char* argv[] = {cc("test")}; - paddle::initializeLogging(1, argv); - P_LOG(INFO) << "This output will not print to std error"; - exit(1); - }; - - ASSERT_DEATH(logToStderrCallback(), ""); -} - -constexpr char kLogDirName[] = "./test_log_dir"; -const std::vector kLevels = {"INFO", "WARNING", "ERROR", "FATAL"}; - -TEST(Logging, LogToDir) { - ASSERT_EQ(0, mkdir(kLogDirName, 0777)); - auto logToDirCallback = [] { - setenv("PLOG_LOGTOSTDERR", "0", true); - setenv("PLOG_LOGDIR", kLogDirName, true); - char* argv[] = {cc("test")}; - paddle::initializeLogging(1, argv); - - P_LOG(INFO) << "INFO"; - P_LOG(WARNING) << "WARNING"; - P_LOG(ERROR) << "ERROR"; - P_LOG(FATAL) << "FATAL"; - }; - ASSERT_DEATH(logToDirCallback(), ""); - - // There 4 file in logdir - auto dir = opendir(kLogDirName); - size_t fileCount = 0; - std::vector filenames; - for (auto dirContent = readdir(dir); dirContent != nullptr; - dirContent = readdir(dir)) { - std::string filename(dirContent->d_name); - if (filename == "." || filename == "..") { - continue; - } else { - ++fileCount; - for (size_t i = 0; i < kLevels.size(); ++i) { - const std::string& curLevel = kLevels[i]; - if (filename.size() > curLevel.length()) { - size_t diff = filename.size() - curLevel.length(); - size_t j = 0; - for (; j < curLevel.length(); ++j) { - if (filename[j + diff] != curLevel[j]) { - // File Suffix Not Same, then break. - break; - } - } - if (j == curLevel.length()) { // Same suffix. - std::ifstream fin; - auto fn = paddle::path::join(kLogDirName, filename); - fin.open(fn); - filenames.push_back(fn); - ASSERT_TRUE(fin.is_open()); - size_t lineCounter = 0; - for (std::string line; std::getline(fin, line); ++lineCounter) { - // Do Nothing, Just calc lineCounter. - } - - // For example. - // The info channel will have all log which level >= INFO - // So the info file's lineCounter should == 4. - ASSERT_EQ(kLevels.size() - i, lineCounter); - fin.close(); - } - } - } - } - } - closedir(dir); - ASSERT_EQ(4UL, fileCount); // 4 levels. - // Clean Unittest. - for (std::string& fn : filenames) { - ASSERT_EQ(remove(fn.c_str()), 0); - } - ASSERT_EQ(rmdir(kLogDirName), 0); -} - -int main(int argc, char** argv) { - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - -#else - -int main(int, char**) { return 0; } - -#endif diff --git a/paddle/utils/tests/test_SpinLock.cpp b/paddle/utils/tests/test_SpinLock.cpp index 22f8584ef559d78bb1ba01356b3361accf3093c4..8351e7e3acd1afe1c6507ffced32f27ce065e5ce 100644 --- a/paddle/utils/tests/test_SpinLock.cpp +++ b/paddle/utils/tests/test_SpinLock.cpp @@ -19,7 +19,7 @@ limitations under the License. */ #include "paddle/utils/Logging.h" #include "paddle/utils/Util.h" -P_DEFINE_int32(test_thread_num, 100, "testing thread number"); +DEFINE_int32(test_thread_num, 100, "testing thread number"); void testNormalImpl( size_t thread_num, diff --git a/paddle/utils/tests/test_ThreadBarrier.cpp b/paddle/utils/tests/test_ThreadBarrier.cpp index 4a8af5b97e3977961bce40a9aa9ad691113e342b..60c2214ffd1066ed4f7b95cd63dfe6a24fe66d67 100644 --- a/paddle/utils/tests/test_ThreadBarrier.cpp +++ b/paddle/utils/tests/test_ThreadBarrier.cpp @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/utils/Logging.h" #include "paddle/utils/Util.h" -P_DEFINE_int32(test_thread_num, 100, "testing thread number"); +DEFINE_int32(test_thread_num, 100, "testing thread number"); void testNormalImpl( size_t thread_num, diff --git a/proto/ModelConfig.proto b/proto/ModelConfig.proto index b34e1ebdedab104f7c16dbf9e1a264f3665115ce..552af71e76e5adf27f35bb5ad6fd8a69c71df0f1 100644 --- a/proto/ModelConfig.proto +++ b/proto/ModelConfig.proto @@ -245,7 +245,7 @@ message ImageConfig { // The size of input feature map. required uint32 img_size = 8; - required uint32 img_size_y = 9; + optional uint32 img_size_y = 9; } message LayerInputConfig { diff --git a/python/paddle/trainer_config_helpers/data_sources.py b/python/paddle/trainer_config_helpers/data_sources.py index 3741bfe79523b81a564ae536419eae5755ebd146..e7a67baa772c8f97e32e51e2b44ba803e42f61ad 100644 --- a/python/paddle/trainer_config_helpers/data_sources.py +++ b/python/paddle/trainer_config_helpers/data_sources.py @@ -171,7 +171,7 @@ def define_py_data_sources2(train_list, test_list, module, obj, args=None): obj="process", args={"dictionary": dict_name}) - The related data provider can refer to :ref:`api_pydataprovider2_en_sequential_model` . + The related data provider can refer to :ref:`api_pydataprovider2_sequential_model` . :param train_list: Train list name. :type train_list: basestring diff --git a/third_party/gflags.BUILD b/third_party/gflags.BUILD new file mode 100644 index 0000000000000000000000000000000000000000..85e8bd0bd74942102e5e9a9f817dc49383a745e7 --- /dev/null +++ b/third_party/gflags.BUILD @@ -0,0 +1,12 @@ +# Bazel (http://bazel.io/) BUILD file for gflags. +# +# See INSTALL.md for instructions for adding gflags to a Bazel workspace. + +licenses(["notice"]) + +exports_files(["src/gflags_complections.sh", "COPYING.txt"]) + +load(":bazel/gflags.bzl", "gflags_sources", "gflags_library") +(hdrs, srcs) = gflags_sources(namespace=["google", "gflags"]) +gflags_library(hdrs=hdrs, srcs=srcs, threads=0) +gflags_library(hdrs=hdrs, srcs=srcs, threads=1) diff --git a/third_party/gflags_test/BUILD b/third_party/gflags_test/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..b50615203ba17c74a4c7611b685f3d3210389bbf --- /dev/null +++ b/third_party/gflags_test/BUILD @@ -0,0 +1,10 @@ +licenses(["notice"]) # Apache 2.0 + +cc_test( + name="gflags_test", + srcs=["gflags_test.cc"], + copts=["-Iexternal/gtest/include"], + deps=[ + "@gtest//:gtest", + "@gflags//:gflags", + ], ) diff --git a/third_party/gflags_test/gflags_test.cc b/third_party/gflags_test/gflags_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..53286e7e5be062cf66b37d07047b173ea831e6c4 --- /dev/null +++ b/third_party/gflags_test/gflags_test.cc @@ -0,0 +1,33 @@ +#include +#include + +#include "gflags/gflags.h" +#include "gtest/gtest.h" + +DEFINE_bool(verbose, false, "Display program name before message"); +DEFINE_string(message, "Hello world!", "Message to print"); + +static bool IsNonEmptyMessage(const char *flagname, const std::string &value) { + return value[0] != '\0'; +} +DEFINE_validator(message, &IsNonEmptyMessage); + +namespace third_party { +namespace gflags_test { + +TEST(GflagsTest, ParseAndPrint) { + gflags::SetUsageMessage("some usage message"); + gflags::SetVersionString("1.0.0"); + int argc = 1; + char program_name[] = "gflags_test"; + char **argv = new char *[2]; + argv[0] = program_name; + argv[1] = NULL; + gflags::ParseCommandLineFlags(&argc, reinterpret_cast(&argv), true); + EXPECT_EQ("gflags_test", std::string(gflags::ProgramInvocationShortName())); + EXPECT_EQ("Hello world!", FLAGS_message); + gflags::ShutDownCommandLineFlags(); +} + +} // namespace gflags_test +} // namespace third_party diff --git a/third_party/glog.BUILD b/third_party/glog.BUILD new file mode 100644 index 0000000000000000000000000000000000000000..a0ff1d6b416c2217b62f64bceee3c6a611c11dfe --- /dev/null +++ b/third_party/glog.BUILD @@ -0,0 +1,128 @@ +licenses(["notice"]) + +cc_library( + visibility=["//visibility:public"], + name="glog", + includes=[ + ".", + "src", + ], + copts=[ + "-D_START_GOOGLE_NAMESPACE_='namespace google {'", + "-D_END_GOOGLE_NAMESPACE_='}'", + "-DGOOGLE_NAMESPACE='google'", + "-DGOOGLE_GLOG_DLL_DECL=''", + "-DHAVE_DLADDR", + "-DHAVE_SNPRINTF", + "-DHAVE_DLFCN_H", + "-DHAVE_FCNTL", + "-DHAVE_GLOB_H", + "-DHAVE_INTTYPES_H", + "-DHAVE_LIBPTHREAD", + "-DHAVE_SYS_SYSCALL_H", + "-DHAVE_MEMORY_H", + "-DHAVE_NAMESPACES", + "-DHAVE_PREAD", + "-DHAVE_PTHREAD", + "-DHAVE_PWD_H", + "-DHAVE_PWRITE", + "-DHAVE_RWLOCK", + "-DHAVE_SIGACTION", + "-DHAVE_SIGALTSTACK", + "-DHAVE_STDINT_H", + "-DHAVE_STRING_H", + "-DHAVE_SYS_TIME_H", + "-DHAVE_SYS_TYPES_H", + "-DHAVE_SYS_UCONTEXT_H", + "-DHAVE_SYS_UTSNAME_H", + "-DHAVE_UNISTD_H", + "-DHAVE_USING_OPERATOR", + "-DHAVE_HAVE___ATTRIBUTE___", + "-DHAVE_HAVE___BUILTIN_EXPECT", + #"-DNO_FRAME_POINTER", + "-D_GNU_SOURCE", + #"-fno-sanitize=thread", + #"-fno-sanitize=address", + "-Iexternal/glog/src", + ], + srcs=[ + "src/demangle.cc", + "src/logging.cc", + "src/raw_logging.cc", + "src/signalhandler.cc", + "src/symbolize.cc", + "src/utilities.cc", + "src/vlog_is_on.cc", + ":config_h", + ":logging_h", + ":raw_logging_h", + ":stl_logging_h", + ":vlog_is_on_h", + ], + hdrs=[ + "src/demangle.h", + "src/mock-log.h", + "src/stacktrace.h", + "src/symbolize.h", + "src/utilities.h", + "src/base/commandlineflags.h", + "src/base/googleinit.h", + "src/base/mutex.h", + "src/glog/log_severity.h", + ]) + +genrule( + name="config_h", + srcs=["src/config.h.cmake.in"], + outs=["config.h"], + cmd="awk '{ gsub(/^#cmakedefine/, \"//cmakedefine\"); print; }' $(<) > $(@)", +) + +genrule( + name="logging_h", + srcs=["src/glog/logging.h.in"], + outs=["glog/logging.h"], + cmd="$(location :gen_sh) < $(<) > $(@)", + tools=[":gen_sh"]) + +genrule( + name="raw_logging_h", + srcs=["src/glog/raw_logging.h.in"], + outs=["glog/raw_logging.h"], + cmd="$(location :gen_sh) < $(<) > $(@)", + tools=[":gen_sh"]) + +genrule( + name="stl_logging_h", + srcs=["src/glog/stl_logging.h.in"], + outs=["glog/stl_logging.h"], + cmd="$(location :gen_sh) < $(<) > $(@)", + tools=[":gen_sh"]) + +genrule( + name="vlog_is_on_h", + srcs=["src/glog/vlog_is_on.h.in"], + outs=["glog/vlog_is_on.h"], + cmd="$(location :gen_sh) < $(<) > $(@)", + tools=[":gen_sh"]) + +genrule( + name="gen_sh", + outs=["gen.sh"], + cmd=""" +cat > $@ <<"EOF" +#! /bin/sh +sed -e 's/@ac_cv_have_unistd_h@/1/g' \ + -e 's/@ac_cv_have_stdint_h@/1/g' \ + -e 's/@ac_cv_have_systypes_h@/1/g' \ + -e 's/@ac_cv_have_libgflags_h@/1/g' \ + -e 's/@ac_cv_have_uint16_t@/1/g' \ + -e 's/@ac_cv_have___builtin_expect@/1/g' \ + -e 's/@ac_cv_have_.*@/0/g' \ + -e 's/@ac_google_start_namespace@/namespace google {/g' \ + -e 's/@ac_google_end_namespace@/}/g' \ + -e 's/@ac_google_namespace@/google/g' \ + -e 's/@ac_cv___attribute___noinline@/__attribute__((noinline))/g' \ + -e 's/@ac_cv___attribute___noreturn@/__attribute__((noreturn))/g' \ + -e 's/@ac_cv___attribute___printf_4_5@/__attribute__((__format__ (__printf__, 4, 5)))/g' +EOF""") diff --git a/third_party/glog_test/BUILD b/third_party/glog_test/BUILD new file mode 100644 index 0000000000000000000000000000000000000000..56d08e95f8e8f063829ae68586fa9ef53306fef6 --- /dev/null +++ b/third_party/glog_test/BUILD @@ -0,0 +1,10 @@ +licenses(["notice"]) # Apache 2.0 + +cc_test( + name="glog_test", + srcs=["glog_test.cc"], + copts=["-Iexternal/gtest/include"], + deps=[ + "@gtest//:gtest", + "@glog//:glog", + ], ) diff --git a/third_party/glog_test/glog_test.cc b/third_party/glog_test/glog_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..f1d737d625d25e8675f636075876903c42881a35 --- /dev/null +++ b/third_party/glog_test/glog_test.cc @@ -0,0 +1,7 @@ +#include +#include + +#include "glog/logging.h" +#include "gtest/gtest.h" + +TEST(GlogTest, Logging) { LOG(INFO) << "Hello world"; } diff --git a/third_party/gtest.BUILD b/third_party/gtest.BUILD index 71c74af513ca8c511dd273d089722bc7fbffd84c..9255b51d9aaa9c7ee5cbc1b2d537815c7ecbfcba 100644 --- a/third_party/gtest.BUILD +++ b/third_party/gtest.BUILD @@ -1,5 +1,5 @@ cc_library( - name="main", + name="gtest", srcs=glob( ["src/*.cc"], exclude=["src/gtest-all.cc"]), hdrs=glob(["include/**/*.h", "src/*.h"]), diff --git a/third_party/protobuf_test/BUILD b/third_party/protobuf_test/BUILD index 95a687a35629315398c7046ca7dc8d526892e227..67d4293c70eef081f6bb95de9774613a19ba91dd 100644 --- a/third_party/protobuf_test/BUILD +++ b/third_party/protobuf_test/BUILD @@ -19,6 +19,6 @@ cc_test( srcs=["example_lib_test.cc"], copts=["-Iexternal/gtest/include"], deps=[ - "@gtest//:main", + "@gtest//:gtest", ":example_lib", ], )