提交 7598846c 编写于 作者: W wangyanfei01

Merge remote branch 'origin/develop' into fix_data_sources

......@@ -25,8 +25,8 @@ find_package(ZLIB REQUIRED)
find_package(NumPy REQUIRED)
find_package(Threads REQUIRED)
find_package(AVX QUIET)
find_package(Glog)
find_package(Gflags QUIET)
find_package(Glog REQUIRED)
find_package(Gflags REQUIRED)
find_package(GTest)
find_package(Sphinx)
find_package(Doxygen)
......@@ -40,8 +40,6 @@ option(WITH_AVX "Compile PaddlePaddle with avx intrinsics" ${AVX_FOUND})
option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON)
option(WITH_STYLE_CHECK "Style Check for PaddlePaddle" ${PYTHONINTERP_FOUND})
option(WITH_RDMA "Compile PaddlePaddle with rdma support" OFF)
option(WITH_GLOG "Compile PaddlePaddle use glog, otherwise use a log implement internally" ${LIBGLOG_FOUND})
option(WITH_GFLAGS "Compile PaddlePaddle use gflags, otherwise use a flag implement internally" ${GFLAGS_FOUND})
option(WITH_TIMER "Compile PaddlePaddle use timer" OFF)
option(WITH_PROFILER "Compile PaddlePaddle use gpu profiler" OFF)
option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND})
......@@ -136,16 +134,12 @@ else(WITH_RDMA)
add_definitions(-DPADDLE_DISABLE_RDMA)
endif(WITH_RDMA)
if(WITH_GLOG)
add_definitions(-DPADDLE_USE_GLOG)
include_directories(${LIBGLOG_INCLUDE_DIR})
endif()
# glog
include_directories(${LIBGLOG_INCLUDE_DIR})
if(WITH_GFLAGS)
add_definitions(-DPADDLE_USE_GFLAGS)
add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE})
include_directories(${GFLAGS_INCLUDE_DIRS})
endif()
#gflags
add_definitions(-DGFLAGS_NS=${GFLAGS_NAMESPACE})
include_directories(${GFLAGS_INCLUDE_DIRS})
if(WITH_TESTING)
enable_testing()
......@@ -169,5 +163,4 @@ add_subdirectory(paddle)
add_subdirectory(python)
if(WITH_DOC)
add_subdirectory(doc)
add_subdirectory(doc_cn)
endif()
./doc/howto/contribute_to_paddle_en.md
\ No newline at end of file
......@@ -3,7 +3,7 @@ http_archive(
name="protobuf",
url="http://github.com/google/protobuf/archive/v3.1.0.tar.gz",
sha256="0a0ae63cbffc274efb573bdde9a253e3f32e458c41261df51c5dbc5ad541e8f7",
strip_prefix="protobuf-3.1.0", )
strip_prefix="protobuf-3.1.0")
# External dependency to gtest 1.7.0. This method comes from
# https://www.bazel.io/versions/master/docs/tutorial/cpp.html.
......@@ -12,4 +12,20 @@ new_http_archive(
url="https://github.com/google/googletest/archive/release-1.7.0.zip",
sha256="b58cb7547a28b2c718d1e38aee18a3659c9e3ff52440297e965f5edffe34b6d0",
build_file="third_party/gtest.BUILD",
strip_prefix="googletest-release-1.7.0", )
strip_prefix="googletest-release-1.7.0")
# External dependency to gflags. This method comes from
# https://github.com/gflags/example/blob/master/WORKSPACE.
new_git_repository(
name="gflags",
tag="v2.2.0",
remote="https://github.com/gflags/gflags.git",
build_file="third_party/gflags.BUILD")
# External dependency to glog. This method comes from
# https://github.com/reyoung/bazel_playground/blob/master/WORKSPACE
new_git_repository(
name="glog",
remote="https://github.com/google/glog.git",
commit="b6a5e0524c28178985f0d228e9eaa43808dbec3c",
build_file="third_party/glog.BUILD")
......@@ -72,6 +72,7 @@ function( Sphinx_add_target target_name builder conf cache source destination )
${source}
${destination}
COMMENT "Generating sphinx documentation: ${builder}"
COMMAND ln -s ${destination}/index_*.html ${destination}/index.html
)
set_property(
......
......@@ -14,13 +14,9 @@ if(WITH_STYLE_CHECK)
find_package(PythonInterp REQUIRED)
endif()
if(WITH_GLOG)
find_package(Glog REQUIRED)
endif()
find_package(Glog REQUIRED)
if(WITH_GFLAGS)
find_package(Gflags REQUIRED)
endif()
find_package(Gflags REQUIRED)
if(WITH_TESTING)
find_package(GTest REQUIRED)
......
......@@ -65,7 +65,7 @@ endmacro()
# link_paddle_exe
# add paddle library for a paddle executable, such as trainer, pserver.
#
# It will handle WITH_PYTHON/WITH_GLOG etc.
# It will handle WITH_PYTHON etc.
function(link_paddle_exe TARGET_NAME)
if(WITH_RDMA)
generate_rdma_links()
......@@ -108,6 +108,8 @@ function(link_paddle_exe TARGET_NAME)
paddle_cuda
${METRIC_LIBS}
${PROTOBUF_LIBRARY}
${LIBGLOG_LIBRARY}
${GFLAGS_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${CBLAS_LIBS}
${ZLIB_LIBRARIES}
......@@ -125,16 +127,6 @@ function(link_paddle_exe TARGET_NAME)
${PYTHON_LIBRARIES})
endif()
if(WITH_GLOG)
target_link_libraries(${TARGET_NAME}
${LIBGLOG_LIBRARY})
endif()
if(WITH_GFLAGS)
target_link_libraries(${TARGET_NAME}
${GFLAGS_LIBRARIES})
endif()
if(WITH_GPU)
if(NOT WITH_DSO OR WITH_METRIC)
target_link_libraries(${TARGET_NAME}
......@@ -206,5 +198,5 @@ function(create_resources res_file output)
# Convert hex data for C compatibility
string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1," filedata ${filedata})
# Append data to output file
file(APPEND ${output} "const unsigned char ${filename}[] = {${filedata}};\nconst unsigned ${filename}_size = sizeof(${filename});\n")
file(APPEND ${output} "const unsigned char ${filename}[] = {${filedata}0};\nconst unsigned ${filename}_size = sizeof(${filename});\n")
endfunction()
......@@ -43,13 +43,13 @@ def extract_dict_features(pair_file, feature_file):
mark[verb_index] = 1
ctx_0 = sentence_list[verb_index]
if verb_index < len(labels_list) - 2:
if verb_index < len(labels_list) - 1:
mark[verb_index + 1] = 1
ctx_p1 = sentence_list[verb_index + 1]
else:
ctx_p1 = 'eos'
if verb_index < len(labels_list) - 3:
if verb_index < len(labels_list) - 2:
mark[verb_index + 2] = 1
ctx_p2 = sentence_list[verb_index + 2]
else:
......
......@@ -7,25 +7,50 @@ if(NOT DEFINED SPHINX_THEME_DIR)
endif()
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build")
set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build")
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees")
set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees")
# HTML output directory
set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html")
# HTML output director
set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html")
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in"
"${BINARY_BUILD_DIR}/conf.py"
"${CMAKE_CURRENT_SOURCE_DIR}/conf.py.en.in"
"${BINARY_BUILD_DIR_EN}/conf.py"
@ONLY)
sphinx_add_target(paddle_docs
html
${BINARY_BUILD_DIR}
${SPHINX_CACHE_DIR}
${BINARY_BUILD_DIR_EN}
${SPHINX_CACHE_DIR_EN}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR})
${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_docs
gen_proto_py)
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build")
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_doctrees")
# HTML output directory
set(SPHINX_HTML_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/html")
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/conf.py.cn.in"
"${BINARY_BUILD_DIR_CN}/conf.py"
@ONLY)
sphinx_add_target(paddle_docs_cn
html
${BINARY_BUILD_DIR_CN}
${SPHINX_CACHE_DIR_CN}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_CN})
add_dependencies(paddle_docs_cn
gen_proto_py)
关于PaddlePaddle
================
PaddlePaddle是一个最早由百度科学家和工程师共同研发的并行分布式深度学习平台,兼备易用性、高效性、灵活性和可扩展性,目前已被百度内部多个产品线广泛使用。
PaddlePaddle目前已经开放源码, 但是远未完善,我们希望能在这个基础上不断的改进、扩展和延伸。
同时我们希望广大开发者积极提供反馈和贡献源代码,建立一个活跃的开源社区。
致谢
--------
在此,特别感谢PaddlePaddle的[所有贡献者](https://github.com/PaddlePaddle/Paddle/graphs/contributors)
......@@ -15,23 +15,23 @@ MNIST的使用场景
MNIST是一个包含有70,000张灰度图片的数字分类数据集。样例数据 ``mnist_train.txt`` 如下:
.. literalinclude:: mnist_train.txt
.. literalinclude:: src/mnist_train.txt
其中每行数据代表一张图片,行内使用 ``;`` 分成两部分。第一部分是图片的标签,为0-9中的一个数字;第二部分是28*28的图片像素灰度值。 对应的 ``train.list`` 即为这个数据文件的名字:
.. literalinclude:: train.list
.. literalinclude:: src/train.list
dataprovider的使用
++++++++++++++++++
.. literalinclude:: mnist_provider.dict.py
.. literalinclude:: src/mnist_provider.dict.py
- 首先,引入PaddlePaddle的PyDataProvider2包。
- 其次,定义一个Python的 `Decorator <http://www.learnpython.org/en/Decorators>`_ `@provider`_ 。用于将下一行的数据输入函数标记成一个PyDataProvider2,同时设置它的input_types属性。
- `input_types`_:设置这个PyDataProvider2返回什么样的数据。本例根据网络配置中 ``data_layer`` 的名字,显式指定返回的是一个28*28维的稠密浮点数向量和一个[0-9]的10维整数标签。
.. literalinclude:: mnist_config.py
.. literalinclude:: src/mnist_config.py
:lines: 9-10
- 注意:如果用户不显示指定返回数据的对应关系,那么PaddlePaddle会根据layer的声明顺序,来确定对应关系。但这个关系可能不正确,所以推荐使用显式指定的方式来设置input_types。
......@@ -53,7 +53,7 @@ dataprovider的使用
在网络配置里,只需要一行代码就可以调用这个PyDataProvider2,如,
.. literalinclude:: mnist_config.py
.. literalinclude:: src/mnist_config.py
:lines: 1-7
训练数据是 ``train.list`` ,没有测试数据,调用的PyDataProvider2是 ``mnist_provider`` 模块中的 ``process`` 函数。
......@@ -80,7 +80,7 @@ dataprovider的使用
本例采用英文情感分类的数据,即将一段英文文本数据,分类成正面情绪和负面情绪两类(用0和1表示)。样例数据 ``sentimental_train.txt`` 如下:
.. literalinclude:: sentimental_train.txt
.. literalinclude:: src/sentimental_train.txt
dataprovider的使用
++++++++++++++++++
......@@ -90,7 +90,7 @@ dataprovider的使用
- 其中 ``input_types`` 和在 `@provider`_ 中配置的效果一致。本例中的输入特征是词ID的序列,因此使用 ``integer_value_sequence`` 类型来设置。
- 将 ``dictionary`` 存入settings对象,在 ``process`` 函数中使用。 dictionary是从网络配置中传入的dict对象,即一个将单词字符串映射到单词ID的字典。
.. literalinclude:: sentimental_provider.py
.. literalinclude:: src/sentimental_provider.py
网络配置中的调用
++++++++++++++++
......@@ -100,7 +100,7 @@ dataprovider的使用
* 在配置中需要读取外部字典。
* 在声明DataProvider的时候传入dictionary作为参数。
.. literalinclude:: sentimental_config.py
.. literalinclude:: src/sentimental_config.py
:emphasize-lines: 12-14
参考(Reference)
......
.. _api_pydataprovider2_en:
.. _api_pydataprovider2:
PyDataProvider2
===============
......@@ -24,18 +24,18 @@ of 28 x 28 pixels.
A small part of the original data as an example is shown as below:
.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_train.txt
.. literalinclude:: src/mnist_train.txt
Each line of the data contains two parts, separated by :code:`;`. The first part is
label of an image. The second part contains 28x28 pixel float values.
Just write path of the above data into train.list. It looks like this:
.. literalinclude:: ../../../doc_cn/ui/data_provider/train.list
.. literalinclude:: src/train.list
The corresponding dataprovider is shown as below:
.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_provider.py
.. literalinclude:: src/mnist_provider.dict.py
The first line imports PyDataProvider2 package.
The main function is the process function, that has two parameters.
......@@ -74,7 +74,7 @@ sample by using keywords :code:`yield`.
Only a few lines of codes need to be added into the training configuration file,
you can take this as an example.
.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_config.py
.. literalinclude:: src/mnist_config.py
Here we specify training data by :code:`train.list`, and no testing data is specified.
The method which actually provide data is :code:`process`.
......@@ -83,7 +83,7 @@ User also can use another style to provide data, which defines the
:code:`data_layer`'s name explicitly when `yield`. For example,
the :code:`dataprovider` is shown as below.
.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_provider.dict.py
.. literalinclude:: src/mnist_provider.dict.py
:linenos:
If user did't give the :code:`data_layer`'s name, PaddlePaddle will use
......@@ -104,7 +104,7 @@ And PaddlePadle will do all of the rest things\:
Is this cool?
.. _api_pydataprovider2_en_sequential_model:
.. _api_pydataprovider2_sequential_model:
DataProvider for the sequential model
-------------------------------------
......@@ -121,11 +121,11 @@ negative sentiment (marked by 0 and 1 respectively).
A small part of the original data as an example can be found in the path below:
.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_train.txt
.. literalinclude:: src/sentimental_train.txt
The corresponding data provider can be found in the path below:
.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_provider.py
.. literalinclude:: src/sentimental_provider.py
This data provider for sequential model is a little more complex than that
for MINST dataset.
......@@ -143,7 +143,7 @@ initialized. The :code:`on_init` function has the following parameters:
To pass these parameters into DataProvider, the following lines should be added
into trainer configuration file.
.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_config.py
.. literalinclude:: src/sentimental_config.py
The definition is basically same as MNIST example, except:
* Load dictionary in this configuration
......
API
===
DataProvider API
----------------
.. toctree::
:maxdepth: 1
data_provider/dataprovider_cn.rst
data_provider/pydataprovider2_cn.rst
.. _api_trainer_config:
Model Config API
----------------
.. toctree::
:maxdepth: 1
trainer_config_helpers/optimizers.rst
trainer_config_helpers/data_sources.rst
trainer_config_helpers/layers.rst
trainer_config_helpers/activations.rst
trainer_config_helpers/poolings.rst
trainer_config_helpers/networks.rst
trainer_config_helpers/evaluators.rst
trainer_config_helpers/attrs.rst
Applications API
----------------
.. toctree::
:maxdepth: 1
predict/swig_py_paddle_cn.rst
......@@ -7,7 +7,7 @@ DataProvider API
.. toctree::
:maxdepth: 1
data_provider/index_en.rst
data_provider/dataprovider_en.rst
data_provider/pydataprovider2_en.rst
.. _api_trainer_config:
......
......@@ -34,7 +34,7 @@ PaddlePaddle使用swig对常用的预测接口进行了封装,通过编译会
如下是一段使用mnist model来实现手写识别的预测代码。完整的代码见 ``src_root/doc/ui/predict/predict_sample.py`` 。mnist model可以通过 ``src_root\demo\mnist`` 目录下的demo训练出来。
.. literalinclude:: ../../../doc/ui/predict/predict_sample.py
.. literalinclude:: src/predict_sample.py
:language: python
:lines: 15-18,121-136
......
......@@ -13,7 +13,7 @@ Here is a sample python script that shows the typical prediction process for the
MNIST classification problem. A complete sample code could be found at
:code:`src_root/doc/ui/predict/predict_sample.py`.
.. literalinclude:: ./predict_sample.py
.. literalinclude:: src/predict_sample.py
:language: python
:lines: 15-18,90-100,101-104
......@@ -23,7 +23,7 @@ python's :code:`help()` function. Let's walk through the above python script:
* At the beginning, use :code:`swig_paddle.initPaddle()` to initialize
PaddlePaddle with command line arguments, for more about command line arguments
see :ref:`cmd_detail_introduction_en` .
see :ref:`cmd_detail_introduction` .
* Parse the configuration file that is used in training with :code:`parse_config()`.
Because data to predict with always have no label, and output of prediction work
normally is the output layer rather than the cost layer, so you should modify
......@@ -36,7 +36,7 @@ python's :code:`help()` function. Let's walk through the above python script:
- Note: As swig_paddle can only accept C++ matrices, we offer a utility
class DataProviderConverter that can accept the same input data with
PyDataProvider2, for more information please refer to document
of :ref:`api_pydataprovider2_en` .
of :ref:`api_pydataprovider2` .
* Do the prediction with :code:`forwardTest()`, which takes the converted
input data and outputs the activations of the output layer.
......
......@@ -62,7 +62,7 @@ source_suffix = ['.rst', '.md', '.Rmd']
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
master_doc = 'index_cn'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
......@@ -79,7 +79,7 @@ language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns = ['_build', '**/*_en*', '*_en*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
......
......@@ -63,7 +63,7 @@ source_suffix = ['.rst', '.md', '.Rmd']
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
master_doc = 'index_en'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
......@@ -80,7 +80,7 @@ language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns = ['_build', '**/*_cn*', '*_cn*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
......
####################
PaddlePaddle常见问题
FAQ
####################
.. contents::
......@@ -33,10 +33,9 @@ PyDataProvider使用的是异步加载,同时在内存里直接随即选取数
个内存池实际上决定了shuffle的粒度。所以,如果将这个内存池减小,又要保证数据是随机的,
那么最好将数据文件在每次读取之前做一次shuffle。可能的代码为
.. literalinclude:: reduce_min_pool_size.py
.. literalinclude:: src/reduce_min_pool_size.py
这样做可以极大的减少内存占用,并且可能会加速训练过程,详细文档参考 `这里
<../ui/data_provider/pydataprovider2.html#provider>`_ 。
这样做可以极大的减少内存占用,并且可能会加速训练过程,详细文档参考 `这里 <../ui/data_provider/pydataprovider2.html#provider>`_ 。
神经元激活内存
++++++++++++++
......@@ -76,7 +75,7 @@ PaddlePaddle支持非常多的优化算法(Optimizer),不同的优化算法需
使用 :code:`pydataprovider`时,可以减少缓存池的大小,同时设置内存缓存功能,即可以极大的加速数据载入流程。
:code:`DataProvider` 缓存池的减小,和之前减小通过减小缓存池来减小内存占用的原理一致。
.. literalinclude:: reduce_min_pool_size.py
.. literalinclude:: src/reduce_min_pool_size.py
同时 :code:`@provider` 接口有一个 :code:`cache` 参数来控制缓存方法,将其设置成 :code:`CacheType.CACHE_PASS_IN_MEM` 的话,会将第一个 :code:`pass` (过完所有训练数据即为一个pass)生成的数据缓存在内存里,在之后的 :code:`pass` 中,不会再从 :code:`python` 端读取数据,而是直接从内存的缓存里读取数据。这也会极大减少数据读入的耗时。
......@@ -90,11 +89,11 @@ PaddlePaddle支持Sparse的训练,sparse训练需要训练特征是 :code:`spa
使用一个词前两个词和后两个词,来预测这个中间的词。这个任务的DataProvider为\:
.. literalinclude:: word2vec_dataprovider.py
.. literalinclude:: src/word2vec_dataprovider.py
这个任务的配置为\:
.. literalinclude:: word2vec_config.py
.. literalinclude:: src/word2vec_config.py
更多关于sparse训练的内容请参考 `sparse训练的文档 <TBD>`_
......@@ -158,7 +157,7 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字
这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。
7. *-cp27mu-linux_x86_64.whl is not a supported wheel on this platform.
-----------------------------------------------------------------------
---------------------------------------------------------------------------
出现这个问题的主要原因是,系统编译wheel包的时候,使用的 :code:`wheel` 包是最新的,
而系统中的 :code:`pip` 包比较老。具体的解决方法是,更新 :code:`pip` 包并重新编译PaddlePaddle。
......@@ -220,7 +219,7 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字
10. CMake源码编译, 找到的PythonLibs和PythonInterp版本不一致
----------------------------------------------------------
----------------------------------------------------------------
这是目前CMake寻找Python的逻辑存在缺陷,如果系统安装了多个Python版本,CMake找到的Python库和Python解释器版本可能有不一致现象,导致编译PaddlePaddle失败。正确的解决方法是,
用户强制指定特定的Python版本,具体操作如下:
......
......@@ -58,6 +58,7 @@ PaddlePaddle是源于百度的一个深度学习平台。这份简短的介绍
cost = regression_cost(input= ȳ, label=y)
outputs(cost)
这段简短的配置展示了PaddlePaddle的基本用法:
- 第一部分定义了数据输入。一般情况下,PaddlePaddle先从一个文件列表里获得数据文件地址,然后交给用户自定义的函数(例如上面的 `process`函数)进行读入和预处理从而得到真实输入。本文中由于输入数据是随机生成的不需要读输入文件,所以放一个空列表(`empty.list`)即可。
......
......@@ -49,8 +49,6 @@ PaddlePaddle supports some build options. To enable it, first you need to instal
<tbody>
<tr><td class="left">WITH_GPU</td><td class="left">Compile with GPU mode.</td></tr>
<tr><td class="left">WITH_DOUBLE</td><td class="left">Compile with double precision floating-point, default: single precision.</td></tr>
<tr><td class="left">WITH_GLOG</td><td class="left">Compile with glog. If not found, default: an internal log implementation.</td></tr>
<tr><td class="left">WITH_GFLAGS</td><td class="left">Compile with gflags. If not found, default: an internal flag implementation.</td></tr>
<tr><td class="left">WITH_TESTING</td><td class="left">Compile with gtest for PaddlePaddle's unit testing.</td></tr>
<tr><td class="left">WITH_DOC</td><td class="left"> Compile to generate PaddlePaddle's docs, default: disabled (OFF).</td></tr>
<tr><td class="left">WITH_SWIG_PY</td><td class="left">Compile with python predict API, default: disabled (OFF).</td></tr>
......
......@@ -6,8 +6,6 @@ WITH_AVX,是否编译含有AVX指令集的PaddlePaddle二进制文件,是
WITH_PYTHON,是否内嵌PYTHON解释器。方便今后的嵌入式移植工作。,是
WITH_STYLE_CHECK,是否编译时进行代码风格检查,是
WITH_RDMA,是否开启RDMA,否
WITH_GLOG,是否开启GLOG。如果不开启,则会使用一个简化版的日志,同时方便今后的嵌入式移植工作。,取决于是否寻找到GLOG
WITH_GFLAGS,是否使用GFLAGS。如果不开启,则会使用一个简化版的命令行参数解析器,同时方便今后的嵌入式移植工作。,取决于是否寻找到GFLAGS
WITH_TIMER,是否开启计时功能。如果开启会导致运行略慢,打印的日志变多,但是方便调试和测Benchmark,否
WITH_TESTING,是否开启单元测试,取决于是否寻找到GTEST
WITH_DOC,是否编译中英文文档,否
......
......@@ -111,7 +111,24 @@ cuda相关的Driver和设备映射进container中,脚本类似于
简单的含有ssh的Dockerfile如下:
.. literalinclude:: paddle_ssh.Dockerfile
.. code-block:: bash
FROM paddledev/paddle:cpu-latest
MAINTAINER PaddlePaddle dev team <paddle-dev@baidu.com>
RUN apt-get update
RUN apt-get install -y openssh-server
RUN mkdir /var/run/sshd
RUN echo 'root:root' | chpasswd
RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]
使用该Dockerfile构建出镜像,然后运行这个container即可。相关命令为\:
......
......@@ -17,7 +17,7 @@ CPU-only one and a CUDA GPU one. We do so by configuring
`dockerhub.com <https://hub.docker.com/r/paddledev/paddle/>`_
automatically runs the following commands:
.. code-block:: base
.. code-block:: bash
docker build -t paddle:cpu -f paddle/scripts/docker/Dockerfile .
docker build -t paddle:gpu -f paddle/scripts/docker/Dockerfile.gpu .
......
......@@ -9,8 +9,8 @@ PaddlePaddle提供数个预编译的二进制来进行安装,包括Docker镜
.. toctree::
:maxdepth: 1
install/docker_install.rst
install/ubuntu_install.rst
docker_install_cn.rst
ubuntu_install_cn.rst
......@@ -24,4 +24,4 @@ PaddlePaddle提供数个预编译的二进制来进行安装,包括Docker镜
.. toctree::
:maxdepth: 1
cmake/index.rst
cmake/build_from_source_cn.rst
\ No newline at end of file
......@@ -38,7 +38,18 @@ PaddlePaddle提供了ubuntu 14.04 deb安装包。
安装完成后,可以使用命令 :code:`paddle version` 查看安装后的paddle 版本:
.. literalinclude:: paddle_version.txt
.. code-block:: shell
PaddlePaddle 0.8.0b1, compiled with
with_avx: ON
with_gpu: OFF
with_double: OFF
with_python: ON
with_rdma: OFF
with_metric_learning:
with_timer: OFF
with_predict_sdk:
可能遇到的问题
--------------
......
GET STARTED
============
.. toctree::
:maxdepth: 2
build_and_install/index_cn.rst
basic_usage/index_cn.rst
```eval_rst
.. _cmd_detail_introduction_en:
.. _cmd_detail_introduction:
```
# Detail Description
......
```eval_rst
.. _cmd_line_index_en:
.. _cmd_line_index:
```
# How to Set Command-line Parameters
......
......@@ -47,7 +47,7 @@ DataProvider是PaddlePaddle系统的数据提供器,将用户的原始数据
一个简单的训练配置文件为:
.. literalinclude:: trainer_config.py
.. literalinclude:: src/trainer_config.py
:linenos:
文件开头 ``from paddle.trainer_config_helpers import *`` ,是因为PaddlePaddle配置文件与C++模块通信的最基础协议是protobuf,为了避免用户直接写复杂的protobuf string,我们为用户定以Python接口来配置网络,该Python代码可以生成protobuf包,这就是`trainer_config_helpers`_的作用。因此,在文件的开始,需要import这些函数。 这个包里面包含了模型配置需要的各个模块。
......@@ -114,7 +114,7 @@ PaddlePaddle 可以使用 ``mixed layer`` 配置出非常复杂的网络,甚
PaddlePaddle多机采用经典的 Parameter Server 架构对多个节点的 trainer 进行同步。多机训练的经典拓扑结构如下\:
.. graphviz:: pserver_topology.dot
.. graphviz:: src/pserver_topology.dot
图中每个灰色方块是一台机器,在每个机器中,先使用命令 ``paddle pserver`` 启动一个pserver进程,并指定端口号,可能的参数是\:
......
......@@ -47,6 +47,22 @@ Then you can start to develop by making a local developement branch
git checkout -b MY_COOL_STUFF_BRANCH
```
## Using `pre-commit` hook
Paddle developers use [pre-commit](http://pre-commit.com/) tool to manage git
pre-commit hooks. It can help us format source codes (cpp, python), check some
basic thing before commit (only one EOL for each file, do not add a huge file
in git). `pre-commit` tests is a part of unit tests in Travis-CI now, every
PR doesn't fit hook can not be merged into Paddle.
To use [pre-commit](http://pre-commit.com/), you should install it by
`pip install pre-commit`, and currently, Paddle uses `clang-format` to format
c/cpp sources. Please make sure clang-format 3.8+ installed.
Then just run `pre-commit install` in your Paddle clone directory. When you
commit your code, the pre-commit hook will check the local code if there is
anything not suitable to commit, and so on.
## Commit
Commit your changes by following command lines:
......
How to Configure Deep Models
============================
.. toctree::
:maxdepth: 1
rnn/recurrent_group_cn.md
rnn/hierarchical_layer_cn.rst
rnn/hrnn_rnn_api_compare_cn.rst
rnn/hrnn_demo_cn.rst
......@@ -24,18 +24,18 @@
- 本例中的原始数据一共有10个样本。每个样本由两部分组成,一个label(此处都为2)和一个已经分词后的句子。这个数据也被单层RNN网络直接使用。
.. literalinclude:: ../../../paddle/gserver/tests/Sequence/tour_train_wdseg
.. literalinclude:: ../../../../paddle/gserver/tests/Sequence/tour_train_wdseg
:language: text
- 双层序列数据一共有4个样本。 每个样本间用空行分开,整体数据和原始数据完全一样。但于双层序列的LSTM来说,第一个样本同时encode两条数据成两个向量。这四条数据同时处理的句子数量为\ :code:`[2, 3, 2, 3]`\ 。
.. literalinclude:: ../../../paddle/gserver/tests/Sequence/tour_train_wdseg.nest
.. literalinclude:: ../../../../paddle/gserver/tests/Sequence/tour_train_wdseg.nest
:language: text
其次,对于两种不同的输入数据类型,不同DataProvider对比如下(`sequenceGen.py <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/gserver/tests/sequenceGen.py>`_)\:
.. literalinclude:: ../../../paddle/gserver/tests/sequenceGen.py
.. literalinclude:: ../../../../paddle/gserver/tests/sequenceGen.py
:language: python
:lines: 21-39
:linenos:
......@@ -43,10 +43,11 @@
- 这是普通的单层时间序列的DataProvider代码,其说明如下:
* DataProvider共返回两个数据,分别是words和label。即上述代码中的第19行。
- words是原始数据中的每一句话,所对应的词表index数组。它是integer_value_sequence类型的,即整数数组。words即为这个数据中的单层时间序列。
- label是原始数据中对于每一句话的分类标签,它是integer_value类型的。
.. literalinclude:: ../../../paddle/gserver/tests/sequenceGen.py
.. literalinclude:: ../../../../paddle/gserver/tests/sequenceGen.py
:language: python
:lines: 42-71
:linenos:
......@@ -63,7 +64,7 @@
首先,我们看一下单层RNN的配置。代码中9-15行(高亮部分)即为单层RNN序列的使用代码。这里使用了PaddlePaddle预定义好的RNN处理函数。在这个函数中,RNN对于每一个时间步通过了一个LSTM网络。
.. literalinclude:: ../../../paddle/gserver/tests/sequence_layer_group.conf
.. literalinclude:: ../../../../paddle/gserver/tests/sequence_layer_group.conf
:language: python
:lines: 38-63
:linenos:
......@@ -84,7 +85,7 @@
* 至此,\ :code:`lstm_last`\ 便和单层RNN配置中的\ :code:`lstm_last`\ 具有相同的结果了。
.. literalinclude:: ../../../paddle/gserver/tests/sequence_nest_layer_group.conf
.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_layer_group.conf
:language: python
:lines: 38-64
:linenos:
......@@ -106,7 +107,7 @@
- 单层RNN:过了一个很简单的recurrent_group。每一个时间步,当前的输入y和上一个时间步的输出rnn_state做了一个全链接。
.. literalinclude:: ../../../paddle/gserver/tests/sequence_rnn.conf
.. literalinclude:: ../../../../paddle/gserver/tests/sequence_rnn.conf
:language: python
:lines: 36-48
......@@ -115,7 +116,7 @@
- 内层inner_step的recurrent_group和单层序列的几乎一样。除了boot_layer=outer_mem,表示将外层的outer_mem作为内层memory的初始状态。外层outer_step中,outer_mem是一个子句的最后一个向量,即整个双层group是将前一个子句的最后一个向量,作为下一个子句memory的初始状态。
- 从输入数据上看,单双层序列的句子是一样的,只是双层序列将其又做了子序列划分。因此双层序列的配置中,必须将前一个子句的最后一个元素,作为boot_layer传给下一个子句的memory,才能保证和单层序列的配置中“每个时间步都用了上一个时间步的输出结果”一致。
.. literalinclude:: ../../../paddle/gserver/tests/sequence_nest_rnn.conf
.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_rnn.conf
:language: python
:lines: 39-66
......@@ -151,14 +152,14 @@
* 单层RNN\:
.. literalinclude:: ../../../paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py
.. literalinclude:: ../../../../paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py
:language: python
:lines: 42-59
:linenos:
* 双层RNN\ \:
.. literalinclude:: ../../../paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py
.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py
:language: python
:lines: 41-80
:linenos:
......@@ -181,11 +182,11 @@ Memory
Memory是PaddlePaddle实现RNN时候使用的一个概念。RNN即时间递归神经网络,通常要求时间步之间具有一些依赖性,即当前时间步下的神经网络依赖前一个时间步神经网络中某一个神经元输出。如下图所示。
.. graphviz:: glossary_rnn.dot
.. graphviz:: src/glossary_rnn.dot
上图中虚线的连接,即是跨越时间步的网络连接。PaddlePaddle在实现RNN的时候,将这种跨越时间步的连接用一个特殊的神经网络单元实现。这个神经网络单元就叫Memory。Memory可以缓存上一个时刻某一个神经元的输出,然后在下一个时间步输入给另一个神经元。使用Memory的RNN实现便如下图所示。
.. graphviz:: glossary_rnn_with_memory.dot
.. graphviz:: src/glossary_rnn_with_memory.dot
使用这种方式,PaddlePaddle可以比较简单的判断哪些输出是应该跨越时间步的,哪些不是。
......
......@@ -30,7 +30,7 @@ Then at the :code:`process` function, each :code:`yield` function will return th
yield src_ids, trg_ids, trg_ids_next
For more details description of how to write a data provider, please refer to :ref:`api_pydataprovider2_en` . The full data provider file is located at :code:`demo/seqToseq/dataprovider.py`.
For more details description of how to write a data provider, please refer to :ref:`api_pydataprovider2` . The full data provider file is located at :code:`demo/seqToseq/dataprovider.py`.
===============================================
Configure Recurrent Neural Network Architecture
......@@ -42,7 +42,7 @@ Simple Gated Recurrent Neural Network
Recurrent neural network process a sequence at each time step sequentially. An example of the architecture of LSTM is listed below.
.. image:: ../../../tutorials/sentiment_analysis/bi_lstm.jpg
.. image:: ../../../tutorials/sentiment_analysis/src/bi_lstm.jpg
:align: center
Generally speaking, a recurrent network perform the following operations from :math:`t=1` to :math:`t=T`, or reversely from :math:`t=T` to :math:`t=1`.
......@@ -246,6 +246,6 @@ The code is listed below:
outputs(beam_gen)
Notice that this generation technique is only useful for decoder like generation process. If you are working on sequence tagging tasks, please refer to :ref:`semantic_role_labeling_en` for more details.
Notice that this generation technique is only useful for decoder like generation process. If you are working on sequence tagging tasks, please refer to :ref:`semantic_role_labeling` for more details.
The full configuration file is located at :code:`demo/seqToseq/seqToseq_net.py`.
HOW TO
=======
Usage
-------
.. toctree::
:maxdepth: 1
concepts/use_concepts_cn.rst
cluster/k8s/paddle_on_k8s_cn.md
cluster/k8s/distributed_training_on_k8s_cn.md
Development
------------
.. toctree::
:maxdepth: 1
write_docs/index_cn.rst
deep_model/index_cn.rst
Optimization
-------------
.. toctree::
:maxdepth: 1
PaddlePaddle 文档
======================
.. toctree::
:maxdepth: 1
getstarted/index_cn.rst
tutorials/index_cn.md
howto/index_cn.rst
api/index_cn.rst
faq/index_cn.rst
......@@ -9,3 +9,4 @@ PaddlePaddle Documentation
howto/index_en.rst
api/index_en.rst
about/index_en.rst
\ No newline at end of file
# Model Zoo - ImageNet #
[ImageNet](http://www.image-net.org/) 是通用物体分类领域一个众所周知的数据库。本教程提供了一个用于ImageNet上的卷积分类网络模型。
## ResNet 介绍
论文 [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385) 中提出的ResNet网络结构在2015年ImageNet大规模视觉识别竞赛(ILSVRC 2015)的分类任务中赢得了第一名。他们提出残差学习的框架来简化网络的训练,所构建网络结构的的深度比之前使用的网络有大幅度的提高。下图展示的是基于残差的连接方式。左图构造网络模块的方式被用于34层的网络中,而右图的瓶颈连接模块用于50层,101层和152层的网络结构中。
<center>![resnet_block](./resnet_block.jpg)</center>
<center>图 1. ResNet 网络模块</center>
本教程中我们给出了三个ResNet模型,这些模型都是由原作者提供的模型<https://github.com/KaimingHe/deep-residual-networks>转换过来的。我们使用PaddlePaddle在ILSVRC的验证集共50,000幅图像上测试了模型的分类错误率,其中输入图像的颜色通道顺序为**BGR**,保持宽高比缩放到短边为256,只截取中心方形的图像区域。分类错误率和模型大小由下表给出。
<center>
<table border="2" cellspacing="0" cellpadding="6" rules="all" frame="border">
<colgroup>
<col class="left" />
<col class="left" />
<col class="left" />
</colgroup>
<thead>
<tr>
<th scope="col" class="left">ResNet</th>
<th scope="col" class="left">Top-1</th>
<th scope="col" class="left">Model Size</th>
</tr>
</thead>
<tbody>
<tr>
<td class="left">ResNet-50</td>
<td class="left">24.9%</td>
<td class="left">99M</td>
</tr>
<tr>
<td class="left">ResNet-101</td>
<td class="left">23.7%</td>
<td class="left">173M</td>
</tr>
<tr>
<td class="left">ResNet-152</td>
<td class="left">23.2%</td>
<td class="left">234M</td>
</tr>
</tbody>
</table></center>
<br>
## ResNet 模型
50层,101层和152层的网络配置文件可参照```demo/model_zoo/resnet/resnet.py```。你也可以通过在命令行参数中增加一个参数如```--config_args=layer_num=50```来指定网络层的数目。
### 网络可视化
你可以通过执行下面的命令来得到ResNet网络的结构可视化图。该脚本会生成一个dot文件,然后可以转换为图片。需要安装graphviz来转换dot文件为图片。
```
cd demo/model_zoo/resnet
./net_diagram.sh
```
### 模型下载
```
cd demo/model_zoo/resnet
./get_model.sh
```
你可以执行上述命令来下载所有的模型和均值文件,如果下载成功,这些文件将会被保存在```demo/model_zoo/resnet/model```路径下。
```
mean_meta_224 resnet_101 resnet_152 resnet_50
```
* resnet_50: 50层网络模型。
* resnet_101: 101层网络模型。
* resnet_152: 152层网络模型。
* mean\_meta\_224: 均值图像文件,图像大小为3 x 224 x 224,颜色通道顺序为**BGR**。你也可以使用这三个值: 103.939, 116.779, 123.68。
### 参数信息
* **卷积层权重**
由于每个卷积层后面连接的是batch normalization层,因此该层中没有偏置(bias)参数,并且只有一个权重。
形状: `(Co, ky, kx, Ci)`
* Co: 输出特征图的通道数目
* ky: 滤波器核在垂直方向上的尺寸
* kx: 滤波器核在水平方向上的尺寸
* Ci: 输入特征图的通道数目
二维矩阵: (Co * ky * kx, Ci), 行优先次序存储。
* **全连接层权重**
二维矩阵: (输入层尺寸, 本层尺寸), 行优先次序存储。
* **[Batch Normalization](<http://arxiv.org/abs/1502.03167>) 层权重**
本层有四个参数,实际上只有.w0和.wbias是需要学习的参数,另外两个分别是滑动均值和方差。在测试阶段它们将会被加载到模型中。下表展示了batch normalization层的参数。
<center>
<table border="2" cellspacing="0" cellpadding="6" rules="all" frame="border">
<colgroup>
<col class="left" />
<col class="left" />
<col class="left" />
</colgroup>
<thead>
<tr>
<th scope="col" class="left">参数名</th>
<th scope="col" class="left">尺寸</th>
<th scope="col" class="left">含义</th>
</tr>
</thead>
<tbody>
<tr>
<td class="left">_res2_1_branch1_bn.w0</td>
<td class="left">256</td>
<td class="left">gamma, 缩放参数</td>
</tr>
<tr>
<td class="left">_res2_1_branch1_bn.w1</td>
<td class="left">256</td>
<td class="left">特征图均值</td>
</tr>
<tr>
<td class="left">_res2_1_branch1_bn.w2</td>
<td class="left">256</td>
<td class="left">特征图方差</td>
</tr>
<tr>
<td class="left">_res2_1_branch1_bn.wbias</td>
<td class="left">256</td>
<td class="left">beta, 偏置参数</td>
</tr>
</tbody>
</table></center>
<br>
### 参数读取
使用者可以使用下面的Python脚本来读取参数值:
```
import sys
import numpy as np
def load(file_name):
with open(file_name, 'rb') as f:
f.read(16) # skip header for float type.
return np.fromfile(f, dtype=np.float32)
if __name__=='__main__':
weight = load(sys.argv[1])
```
或者直接使用下面的shell命令:
```
od -j 16 -f _res2_1_branch1_bn.w0
```
## 特征提取
我们提供了C++和Python接口来提取特征。下面的例子使用了`demo/model_zoo/resnet/example`中的数据,详细地展示了整个特征提取的过程。
### C++接口
首先,在配置文件中的`define_py_data_sources2`里指定图像数据列表,具体请参照示例`demo/model_zoo/resnet/resnet.py`
```
train_list = 'train.list' if not is_test else None
# mean.meta is mean file of ImageNet dataset.
# mean.meta size : 3 x 224 x 224.
# If you use three mean value, set like:
# "mean_value:103.939,116.779,123.68;"
args={
'mean_meta': "model/mean_meta_224/mean.meta",
'image_size': 224, 'crop_size': 224,
'color': True,'swap_channel:': [2, 1, 0]}
define_py_data_sources2(train_list,
'example/test.list',
module="example.image_list_provider",
obj="processData",
args=args)
```
第二步,在`resnet.py`文件中指定要提取特征的网络层的名字。例如,
```
Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn")
```
第三步,在`extract_fea_c++.sh`文件中指定模型路径和输出的目录,然后执行下面的命令。
```
cd demo/model_zoo/resnet
./extract_fea_c++.sh
```
如果执行成功,特征将会存到`fea_output/rank-00000`文件中,如下所示。同时你可以使用`load_feature.py`文件中的`load_feature_c`接口来加载该文件。
```
-0.115318 -0.108358 ... -0.087884;-1.27664 ... -1.11516 -2.59123;
-0.126383 -0.116248 ... -0.00534909;-1.42593 ... -1.04501 -1.40769;
```
* 每行存储的是一个样本的特征。其中,第一行存的是图像`example/dog.jpg`的特征,第二行存的是图像`example/cat.jpg`的特征。
* 不同层的特征由分号`;`隔开,并且它们的顺序与`Outputs()`中指定的层顺序一致。这里,左边是`res5_3_branch2c_conv`层的特征,右边是`res5_3_branch2c_bn`层特征。
### Python接口
示例`demo/model_zoo/resnet/classify.py`中展示了如何使用Python来提取特征。下面的例子同样使用了`./example/test.list`中的数据。执行的命令如下:
```
cd demo/model_zoo/resnet
./extract_fea_py.sh
```
extract_fea_py.sh:
```
python classify.py \
--job=extract \
--conf=resnet.py\
--use_gpu=1 \
--mean=model/mean_meta_224/mean.meta \
--model=model/resnet_50 \
--data=./example/test.list \
--output_layer="res5_3_branch2c_conv,res5_3_branch2c_bn" \
--output_dir=features
```
* \--job=extract: 指定工作模式来提取特征。
* \--conf=resnet.py: 网络配置文件。
* \--use_gpu=1: 指定是否使用GPU。
* \--model=model/resnet_50: 模型路径。
* \--data=./example/test.list: 数据列表。
* \--output_layer="xxx,xxx": 指定提取特征的层。
* \--output_dir=features: 输出目录。
如果运行成功,你将会看到特征存储在`features/batch_0`文件中,该文件是由cPickle产生的。你可以使用`load_feature.py`中的`load_feature_py`接口来打开该文件,它将返回如下的字典:
```
{
'cat.jpg': {'res5_3_branch2c_conv': array([[-0.12638293, -0.116248 , -0.11883899, ..., -0.00895038, 0.01994277, -0.00534909]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.42593431, -1.28918779, -1.32414699, ..., -1.45933616, -1.04501402, -1.40769434]], dtype=float32)},
'dog.jpg': {'res5_3_branch2c_conv': array([[-0.11531784, -0.10835785, -0.08809858, ...,0.0055237, 0.01505112, -0.08788397]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.27663755, -1.18272924, -0.90937918, ..., -1.25178063, -1.11515927, -2.59122872]], dtype=float32)}
}
```
仔细观察,这些特征值与上述使用C++接口提取的结果是一致的。
## 预测
`classify.py`文件也可以用于对样本进行预测。我们提供了一个示例脚本`predict.sh`,它使用50层的ResNet模型来对`example/test.list`中的数据进行预测。
```
cd demo/model_zoo/resnet
./predict.sh
```
predict.sh调用了`classify.py`:
```
python classify.py \
--job=predict \
--conf=resnet.py\
--multi_crop \
--model=model/resnet_50 \
--use_gpu=1 \
--data=./example/test.list
```
* \--job=extract: 指定工作模型进行预测。
* \--conf=resnet.py: 网络配置文件。network configure.
* \--multi_crop: 使用10个裁剪图像块,预测概率取平均。
* \--use_gpu=1: 指定是否使用GPU。
* \--model=model/resnet_50: 模型路径。
* \--data=./example/test.list: 数据列表。
如果运行成功,你将会看到如下结果,其中156和285是这些图像的分类标签。
```
Label of example/dog.jpg is: 156
Label of example/cat.jpg is: 282
```
......@@ -52,7 +52,7 @@ See ```demo/model_zoo/resnet/resnet.py```. This config contains network of 50, 1
### Network Visualization
You can get a diagram of ResNet network by running the following commands. The script generates dot file and then converts dot file to PNG file, which uses installed draw_dot tool in our server. If you can not access the server, just install graphviz to convert dot file.
You can get a diagram of ResNet network by running the following commands. The script generates dot file and then converts dot file to PNG file, which needs to install graphviz to convert.
```
cd demo/model_zoo/resnet
......@@ -138,7 +138,7 @@ There are four parameters in this layer. In fact, only .w0 and .wbias are the le
### Parameter Observation
Users who want to observe the parameters can use python to read:
Users who want to observe the parameters can use Python to read:
```
import sys
......@@ -209,7 +209,7 @@ If successful, features are saved in `fea_output/rank-00000` as follows. And you
### Python Interface
`demo/model_zoo/resnet/classify.py` is an example to show how to use python to extract features. Following example still uses data of `./example/test.list`. Command is as follows:
`demo/model_zoo/resnet/classify.py` is an example to show how to use Python to extract features. Following example still uses data of `./example/test.list`. Command is as follows:
```
cd demo/model_zoo/resnet
......@@ -238,8 +238,6 @@ python classify.py \
* \--output_layer="xxx,xxx": specify layers to extract features.
* \--output_dir=features: output diretcoty.
Note, since the convolution layer in these ResNet models is suitable for the cudnn implementation which only support GPU. It not support CPU mode because of compatibility issue and we will fix later.
If run successfully, you will see features saved in `features/batch_0`, this file is produced with cPickle. You can use `load_feature_py` interface in `load_feature.py` to open the file, and it returns a dictionary as follows:
```
......
# TUTORIALS
There are several examples and demos here.
## Quick Start
* [Quick Start](quick_start/index_cn.rst)
## Image
* TBD
## NLP
* [Sentiment Analysis](sentiment_analysis/index_cn.md)
* [Semantic Role Labeling](semantic_role_labeling/index_cn.rst)
## Recommendation
* TBD
## Model Zoo
* TBD
# TUTORIALS
There are serveral examples and demos here.
There are several examples and demos here.
## [Quick Start](quick_start/index_en.md)
## Quick Start
* [Quick Start](quick_start/index_en.md)
## Image
......
......@@ -21,7 +21,7 @@ PaddlePaddle快速入门教程
使用PaddlePaddle, 每一个任务流程都可以被划分为如下五个步骤。
.. image:: Pipeline.jpg
.. image:: src/Pipeline_cn.jpg
:align: center
:scale: 80%
......@@ -99,7 +99,7 @@ Python脚本读取数据
本小节我们将介绍模型网络结构。
.. image:: PipelineNetwork.jpg
.. image:: src/PipelineNetwork_cn.jpg
:align: center
:scale: 80%
......@@ -112,7 +112,7 @@ Python脚本读取数据
具体流程如下:
.. image:: NetLR.jpg
.. image:: src/NetLR_cn.jpg
:align: center
:scale: 80%
......@@ -176,7 +176,7 @@ embedding模型需要稍微改变提供数据的Python脚本,即 ``dataprovide
该模型依然使用逻辑回归分类网络的框架, 只是将句子用连续向量表示替换为用稀疏向量表示, 即对第三步进行替换。句子表示的计算更新为两步:
.. image:: NetContinuous.jpg
.. image:: src/NetContinuous_cn.jpg
:align: center
:scale: 80%
......@@ -207,7 +207,7 @@ embedding模型需要稍微改变提供数据的Python脚本,即 ``dataprovide
卷积网络是一种特殊的从词向量表示到句子表示的方法, 也就是将词向量模型进一步演化为三个新步骤。
.. image:: NetConv.jpg
.. image:: src/NetConv_cn.jpg
:align: center
:scale: 80%
......@@ -238,7 +238,7 @@ embedding模型需要稍微改变提供数据的Python脚本,即 ``dataprovide
时序模型
----------
.. image:: NetRNN.jpg
.. image:: src/NetRNN_cn.jpg
:align: center
:scale: 80%
......@@ -284,7 +284,7 @@ Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优
在数据加载和网络配置完成之后, 我们就可以训练模型了。
.. image:: PipelineTrain.jpg
.. image:: src/PipelineTrain_cn.jpg
:align: center
:scale: 80%
......@@ -294,7 +294,7 @@ Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优
./train.sh
``train.sh``中包含了训练模型的基本命令。训练时所需设置的主要参数如下:
``train.sh`` 中包含了训练模型的基本命令。训练时所需设置的主要参数如下:
.. code-block:: bash
......@@ -312,7 +312,7 @@ Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优
当模型训练好了之后,我们就可以进行预测了。
.. image:: PipelineTest.jpg
.. image:: src/PipelineTest_cn.jpg
:align: center
:scale: 80%
......
......@@ -32,7 +32,7 @@ The monitor breaks down two months after purchase.
the classifier should output “negative“.
To build your text classification system, your code will need to perform five steps:
<center> ![](./Pipeline_en.jpg) </center>
<center> ![](./src/Pipeline_en.jpg) </center>
- Preprocess data into a standardized format.
- Provide data to the learning model.
......@@ -160,14 +160,14 @@ You can refer to the following link for more detailed examples and data formats:
## Network Architecture
You will describe four kinds of network architectures in this section.
<center> ![](./PipelineNetwork_en.jpg) </center>
<center> ![](./src/PipelineNetwork_en.jpg) </center>
First, you will build a logistic regression model. Later, you will also get chance to build other more powerful network architectures.
For more detailed documentation, you could refer to: <a href = "../../api/trainer_config_helpers/layers.html">layer documentation</a>. All configuration files are in `demo/quick_start` directory.
### Logistic Regression
The architecture is illustrated in the following picture:
<center> ![](./NetLR_en.png) </center>
<center> ![](./src/NetLR_en.png) </center>
- You need define the data for text features. The size of the data layer is the number of words in the dictionary.
......@@ -240,7 +240,7 @@ def process(settings, file_name):
```
This model is very similar to the framework of logistic regression, but it uses word embedding vectors instead of a sparse vectors to represent words.
<center> ![](./NetContinuous_en.png) </center>
<center> ![](./src/NetContinuous_en.png) </center>
- It can look up the dense word embedding vector in the dictionary (its words embedding vector is `word_dim`). The input is a sequence of N words, the output is N word_dim dimensional vectors.
......@@ -283,7 +283,7 @@ The performance is summarized in the following table:
### Convolutional Neural Network Model
Convolutional neural network converts a sequence of word embeddings into a sentence representation using temporal convolutions. You will transform the fully connected layer of the word embedding model to 3 new sub-steps.
<center> ![](./NetConv_en.png) </center>
<center> ![](./src/NetConv_en.png) </center>
Text convolution has 3 steps:
......@@ -324,7 +324,7 @@ The performance is summarized in the following table:
<br>
### Recurrent Model
<center> ![](./NetRNN_en.png) </center>
<center> ![](./src/NetRNN_en.png) </center>
You can use Recurrent neural network as our time sequence model, including simple RNN model, GRU model, and LSTM model。
......@@ -378,7 +378,7 @@ settings(batch_size=128,
## Training Model
After completing data preparation and network architecture specification, you will run the training script.
<center> ![](./PipelineTrain_en.png) </center>
<center> ![](./src/PipelineTrain_en.png) </center>
Training script: our training script is in `train.sh` file. The training arguments are listed below:
......@@ -395,7 +395,7 @@ We do not provide examples on how to train on clusters here. If you want to trai
## Inference
You can use the trained model to perform prediction on the dataset with no labels. You can also evaluate the model on dataset with labels to obtain its test accuracy.
<center> ![](./PipelineTest_en.png) </center>
<center> ![](./src/PipelineTest_en.png) </center>
The test script is listed below. PaddlePaddle can evaluate a model on the data with labels specified in `test.list`.
......
```eval_rst
.. _demo_ml_dataset_en:
.. _demo_ml_dataset:
```
# MovieLens Dataset
......
......@@ -16,7 +16,7 @@ Data Preparation
````````````````
Download and extract dataset
''''''''''''''''''''''''''''
We use :ref:`demo_ml_dataset_en` here.
We use :ref:`demo_ml_dataset` here.
To download and unzip the dataset, simply run the following commands.
.. code-block:: bash
......@@ -264,7 +264,7 @@ In this :code:`dataprovider.py`, we should set\:
* use_seq\: Whether this :code:`dataprovider.py` in sequence mode or not.
* process\: Return each sample of data to :code:`paddle`.
The data provider details document see :ref:`api_pydataprovider2_en`.
The data provider details document see :ref:`api_pydataprovider2`.
Train
`````
......@@ -280,7 +280,7 @@ The run.sh is shown as follow:
It just start a paddle training process, write the log to `log.txt`,
then print it on screen.
Each command line argument in :code:`run.sh`, please refer to the :ref:`cmd_line_index_en` page. The short description of these arguments is shown as follow.
Each command line argument in :code:`run.sh`, please refer to the :ref:`cmd_line_index` page. The short description of these arguments is shown as follow.
* config\: Tell paddle which file is neural network configuration.
* save_dir\: Tell paddle save model into './output'
......
......@@ -149,7 +149,7 @@ paddle train \
训练后,模型将保存在目录`output`中。 我们的训练曲线如下:
<center>
![pic](./curve.jpg)
![pic](./src/curve.jpg)
</center>
### 测试
......
```eval_rst
.. _semantic_role_labeling_en:
.. _semantic_role_labeling:
```
# Semantic Role labeling Tutorial #
......@@ -45,13 +45,13 @@ Unlike Bidirectional-LSTM that used in Sentiment Analysis demo, the DB-LSTM ado
The following figure shows a temporal expanded 2-layer DB-LSTM network.
<center>
![pic](./network_arch.png)
![pic](./src/network_arch.png)
</center>
### Features
Two input features play an essential role in this pipeline: predicate (pred) and argument (argu). Two other features: predicate context (ctx-p) and region mark (mr) are also adopted. Because a single predicate word can not exactly describe the predicate information, especially when the same words appear more than one times in a sentence. With the predicate context, the ambiguity can be largely eliminated. Similarly, we use region mark m<sub>r</sub> = 1 to denote the argument position if it locates in the predicate context region, or m<sub>r</sub> = 0 if does not. These four simple features are all we need for our SRL system. Features of one sample with context size set to 1 is showed as following[2]:
<center>
![pic](./feature.jpg)
![pic](./src/feature.jpg)
</center>
In this sample, the coresponding labelled sentence is:
......@@ -152,7 +152,7 @@ paddle train \
After training, the models will be saved in directory `output`. Our training curve is as following:
<center>
![pic](./curve.jpg)
![pic](./src/curve.jpg)
</center>
### Run testing
......
......@@ -109,7 +109,7 @@ dataset
在这步任务中,我们使用了循环神经网络(RNN)的 LSTM 架构来训练情感分析模型。 引入LSTM模型主要是为了克服消失梯度的问题。 LSTM网络类似于具有隐藏层的标准循环神经网络, 但是隐藏层中的每个普通节点被一个记忆单元替换。 每个记忆单元包含四个主要的元素: 输入门, 具有自循环连接的神经元,忘记门和输出门。 更多的细节可以在文献中找到[4]。 LSTM架构的最大优点是它可以在长时间间隔内记忆信息,而没有短时记忆的损失。在有新的单词来临的每一个时间步骤内,存储在记忆单元区块的历史信息被更新用来迭代的学习单词以合理的序列程现。
<center>![LSTM](../../../doc/demo/sentiment_analysis/lstm.png)</center>
<center>![LSTM](src/lstm.png)</center>
<center>图表 1. LSTM [3]</center>
情感分析是自然语言理解中最典型的问题之一。 它的目的是预测在一个序列中表达的情感态度。 通常, ,仅仅是一些关键词,如形容词和副词,在预测序列或段落的情感中起主要作用。然而有些评论上下文非常长,例如 IMDB的数椐集。 我们只所以使用LSTM来执行这个任务是因为其改进的设计并且具有门机制。 首先,它能够从词级到具有可变上下文长度的上下文级别来总结表示。 第二,它可以在句子级别利用可扩展的上下文, 而大多数方法只是利用n-gram级别的知识。第三,它直接学习段落表示,而不是组合上下文级别信息。
......@@ -120,13 +120,13 @@ dataset
图2是双向LSTM网络,后面连全连接层和softmax层。
<center>![BiLSTM](../../../doc/demo/sentiment_analysis/bi_lstm.jpg)</center>
<center>![BiLSTM](src/bi_lstm.jpg)</center>
<center>图 2. Bidirectional-LSTM </center>
#### Stacked-LSTM
图3是三层LSTM结构。图的底部是word embedding(对文档处理后形成的单词向量)。 接下来,连接三个LSTM隐藏层,并且第二个是反向LSTM。然后提取隐藏LSTM层的所有时间步长的最大词向量作为整个序列的表示。 最后,使用具有softmax激活的全连接前馈层来执行分类任务。 更多内容可查看参考文献 [5]。
<center>![StackedLSTM](../../../doc/demo/sentiment_analysis/stacked_lstm.jpg)</center>
<center>![StackedLSTM](src/stacked_lstm.jpg)</center>
<center>图 3. Stacked-LSTM for sentiment analysis </center>
**配置**
......
if(NOT DEFINED SPHINX_THEME)
set(SPHINX_THEME default)
endif()
if(NOT DEFINED SPHINX_THEME_DIR)
set(SPHINX_THEME_DIR)
endif()
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build")
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees")
# HTML output directory
set(SPHINX_HTML_DIR "${CMAKE_CURRENT_BINARY_DIR}/html")
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in"
"${BINARY_BUILD_DIR}/conf.py"
@ONLY)
sphinx_add_target(paddle_docs_cn
html
${BINARY_BUILD_DIR}
${SPHINX_CACHE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR})
add_dependencies(paddle_docs_cn
gen_proto_py)
使用cmake编译PaddlePaddle
=========================
.. toctree::
install_deps.rst
compile_options.rst
make_and_install.rst
安装编译PaddlePaddle需要的依赖
==============================
参见 `安装编译依赖 <../../../doc/build/build_from_source.html#install-dependencies>`_
make和make install
==================
参见 `make和make install <../../../doc/build/build_from_source.html#build-and-install>`_
FROM paddledev/paddle:cpu-latest
MAINTAINER PaddlePaddle dev team <paddle-dev@baidu.com>
RUN apt-get update
RUN apt-get install -y openssh-server
RUN mkdir /var/run/sshd
RUN echo 'root:root' | chpasswd
RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
EXPOSE 22
CMD ["/usr/sbin/sshd", "-D"]
PaddlePaddle 0.8.0b1, compiled with
with_avx: ON
with_gpu: OFF
with_double: OFF
with_python: ON
with_rdma: OFF
with_glog: ON
with_gflags: ON
with_metric_learning:
with_timer: OFF
with_predict_sdk:
集群训练
========
* `集群训练 <../../doc/cluster/index.html>`_
.. toctree::
:maxdepth: 2
:glob:
集群训练(对内) <internal/index.md>
使用示例
========
图像
''''
* `图像分类 <../../doc/demo/image_classification/index.html>`_
自然语言处理
''''''''''''
* `情感分析 <sentiment_analysis/index.html>`_
* `文本生成 <../../doc/demo/text_generation/index.html>`_
* `词性标注 <../../doc/demo/semantic_role_labeling/index.html>`_
推荐
''''
* `MovieLens数据集 <../../doc/demo/rec/ml_dataset.html>`_
* `MovieLens评分回归 <../../doc/demo/rec/ml_regression.html>`_
常用模型
''''''''
* `ImageNet: ResNet <../../doc/demo/imagenet_model/resnet_model.html>`_
* `Embedding: Chinese Word <../../doc/demo/embedding_model/index.html>`_
# PaddlePaddle快速入门教程
我们以文本分类问题作为背景,介绍PaddlePaddle使用流程和常用的网络基础单元的配置方法。
## 安装(Install)
首先请参考<a href = "../../build_and_install/index.html">安装教程</a>安装PaddlePaddle。
## 使用概述(Overview)
**文本分类问题**:对于给定的一条文本, 我们从提前给定的类别集合中选择其所属类
别。比如通过用户对电子商务网站评论,评估产品的质量:
- 这个显示器很棒! (好评)
- 用了两个月之后这个显示器屏幕碎了。(差评)
每一个任务流程都可以分为如下5个基础部分。
<center> ![](./Pipeline.jpg) </center>
1. 数据格式准备
- 每行保存一条样本,类别Id 和文本信息用Tab间隔, 文本中的单词用空格分隔(如果不切词,则字与字之间用空格分隔),例如:```类别Id ‘\t’ 这 个 显 示 器 很 棒 !```
2. 数据向模型传送
- PaddlePaddle可以读取Python写的传输数据脚本,所有字符都将转换为连续整数表示的Id传给模型
3. 网络结构(由易到难展示4种不同的网络配置)
- 逻辑回归模型
- 词向量模型
- 卷积模型
- 时序模型
- 优化算法
4. 训练模型
5. 预测
## 数据格式准备(Data Preparation)
在本问题中,我们使用[Amazon电子产品评论数据](http://jmcauley.ucsd.edu/data/amazon/)
将评论分为好评(正样本)和差评(负样本)两类。[源码](https://github.com/PaddlePaddle/Paddle)`demo/quick_start`里提供了下载已经预处理数据的脚本(如果想从最原始的数据处理,可以使用脚本 `./demo/quick_start/data/proc_from_raw_data/get_data.sh`)。
```bash
cd demo/quick_start
./data/get_data.sh
```
## 数据向模型传送(Transfer Data to Model)
### Python数据加载脚本(Data Provider Script)
下面dataprovider_bow.py文件给出了完整例子,主要包括两部分:
* initalizer: 定义文本信息、类别Id的数据类型。
* process: yield文本信息和类别Id,和initalizer里定义顺序一致。
```python
from paddle.trainer.PyDataProvider2 import *
# id of the word not in dictionary
UNK_IDX = 0
# initializer is called by the framework during initialization.
# It allows the user to describe the data types and setup the
# necessary data structure for later use.
# `settings` is an object. initializer need to properly fill settings.input_types.
# initializer can also store other data structures needed to be used at process().
# In this example, dictionary is stored in settings.
# `dictionay` and `kwargs` are arguments passed from trainer_config.lr.py
def initializer(settings, dictionary, **kwargs):
# Put the word dictionary into settings
settings.word_dict = dictionary
# setting.input_types specifies what the data types the data provider
# generates.
settings.input_types = [
# The first input is a sparse_binary_vector,
# which means each dimension of the vector is either 0 or 1. It is the
# bag-of-words (BOW) representation of the texts.
sparse_binary_vector(len(dictionary)),
# The second input is an integer. It represents the category id of the
# sample. 2 means there are two labels in the dataset.
# (1 for positive and 0 for negative)
integer_value(2)]
# Delaring a data provider. It has an initializer 'data_initialzer'.
# It will cache the generated data of the first pass in memory, so that
# during later pass, no on-the-fly data generation will be needed.
# `setting` is the same object used by initializer()
# `file_name` is the name of a file listed train_list or test_list file given
# to define_py_data_sources2(). See trainer_config.lr.py.
@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM)
def process(settings, file_name):
# Open the input data file.
with open(file_name, 'r') as f:
# Read each line.
for line in f:
# Each line contains the label and text of the comment, separated by \t.
label, comment = line.strip().split('\t')
# Split the words into a list.
words = comment.split()
# convert the words into a list of ids by looking them up in word_dict.
word_vector = [settings.word_dict.get(w, UNK_IDX) for w in words]
# Return the features for the current comment. The first is a list
# of ids representing a 0-1 binary sparse vector of the text,
# the second is the integer id of the label.
yield word_vector, int(label)
```
### 配置中的数据加载定义(Data Provider in Configure)
在模型配置中利用`define_py_data_sources2`加载数据:
```python
from paddle.trainer_config_helpers import *
file = "data/dict.txt"
word_dict = dict()
with open(dict_file, 'r') as f:
for i, line in enumerate(f):
w = line.strip().split()[0]
word_dict[w] = i
# define the data sources for the model.
# We need to use different process for training and prediction.
# For training, the input data includes both word IDs and labels.
# For prediction, the input data only includs word Ids.
define_py_data_sources2(train_list='data/train.list',
test_list='data/test.list',
module="dataprovider_bow",
obj="process",
args={"dictionary": word_dict})
```
* data/train.list,data/test.list: 指定训练、测试数据
* module="dataprovider": 数据处理Python文件名
* obj="process": 指定生成数据的函数
* args={"dictionary": word_dict}: 额外的参数,这里指定词典
更详细数据格式和用例请参考<a href = "../../ui/data_provider/pydataprovider2.html">
PyDataProvider2</a>
## 网络结构(Network Architecture)
本节我们将专注于网络结构的介绍。
<center> ![](./PipelineNetwork.jpg) </center>
我们将以基本的逻辑回归网络作为起点,并逐渐展示更加深入的功能。更详细的网络配置
连接请参考<a href = "../../../doc/layer.html">Layer文档</a>
所有配置在[源码](https://github.com/PaddlePaddle/Paddle)`demo/quick_start`目录,首先列举逻辑回归网络。
### 逻辑回归模型(Logistic Regression)
流程如下:
<center> ![](./NetLR.jpg) </center>
- 获取利用one-hot vector表示的每个单词,维度是词典大小
```python
word = data_layer(name="word", size=word_dim)
```
- 获取该条样本类别Id,维度是类别个数。
```python
label = data_layer(name="label", size=label_dim)
```
- 利用逻辑回归模型对该向量进行分类,同时会计算分类准确率
```python
# Define a fully connected layer with logistic activation (also called softmax activation).
output = fc_layer(input=word,
size=label_dim,
act_type=SoftmaxActivation())
# Define cross-entropy classification loss and error.
classification_cost(input=output, label=label)
```
- input: 除过data层,每个层都有一个或多个input,多个input以list方式输入
- size: 该层神经元个数
- act_type: 激活函数类型
效果总结:我们将在后面介绍训练和预测的流程的脚本。在此为方便对比不同网络结构,
我们随时总结了各个网络的复杂度和效果。
<html>
<center>
<table border="2" cellspacing="0" cellpadding="6" rules="all" frame="border">
<thead>
<th scope="col" class="left">网络名称</th>
<th scope="col" class="left">参数数量</th>
<th scope="col" class="left">错误率</th>
</tr>
</thead>
<tbody>
<tr>
<td class="left">逻辑回归</td>
<td class="left">252 KB</td>
<td class="left">8.652%</td>
</tr>
</tbody>
</table></center>
</html>
<br>
### 词向量模型(Word Vector)
embedding模型需要稍微改变数据提供的脚本,即`dataprovider_emb.py`,词向量模型、
卷积模型、时序模型均使用该脚本。其中文本输入类型定义为整数时序类型integer_value_sequence。
```
def initializer(settings, dictionary, **kwargs):
settings.word_dict = dictionary
settings.input_types = [
# Define the type of the first input as sequence of integer.
# The value of the integers range from 0 to len(dictrionary)-1
integer_value_sequence(len(dictionary)),
# Define the second input for label id
integer_value(2)]
@provider(init_hook=initializer)
def process(settings, file_name):
...
# omitted, it is same as the data provider for LR model
```
该模型依然是使用逻辑回归分类网络的框架, 只是将句子利用连续向量表示替换稀疏
向量表示, 即对第3步进行替换。句子表示的计算更新为2步:
<center> ![](./NetContinuous.jpg) </center>
- 利用单词Id查找对应的该单词的连续表示向量(维度为word_dim), 输入N个单词,输出为N个word_dim维度向量
```python
emb = embedding_layer(input=word, size=word_dim)
```
- 将该句话包含的所有单词向量求平均得到句子的表示
```python
avg = pooling_layer(input=emb, pooling_type=AvgPooling())
```
其它部分和逻辑回归网络结构一致。
效果总结:
<html>
<center>
<table border="2" cellspacing="0" cellpadding="6" rules="all" frame="border">
<thead>
<th scope="col" class="left">网络名称</th>
<th scope="col" class="left">参数数量</th>
<th scope="col" class="left">错误率</th>
</tr>
</thead>
<tbody>
<tr>
<td class="left">词向量模型</td>
<td class="left">15 MB</td>
<td class="left">8.484%</td>
</tr>
</tbody>
</table>
</html></center>
<br>
### 卷积模型(Convolution)
卷积网络是一种特殊的从词向量表示到句子表示的方法, 也就是将词向量模型额步
骤3-2进行进一步演化, 变为3个新的子步骤。
<center> ![](./NetConv.jpg) </center>
文本卷积分为三个步骤:
1. 获取每个单词左右各k个近邻, 拼接成一个新的向量表示;
2. 对该表示进行非线性变换 (例如Sigmoid变换), 成为维度为hidden_dim的新的向量;
3. 在每个维度上取出在该句话新的向量集合上该维度的最大值作为最后的句子表示向量。 这3个子步骤可配置为:
```python
text_conv = sequence_conv_pool(input=emb,
context_start=k,
context_len=2 * k + 1)
```
效果总结:
<html>
<center>
<table border="2" cellspacing="0" cellpadding="6" rules="all" frame="border">
<thead>
<th scope="col" class="left">网络名称</th>
<th scope="col" class="left">参数数量</th>
<th scope="col" class="left">错误率</th>
</tr>
</thead>
<tbody>
<tr>
<td class="left">卷积模型</td>
<td class="left">16 MB</td>
<td class="left">5.628%</td>
</tr>
</tbody>
</table></center>
<br>
### 时序模型(Time Sequence)
<center> ![](./NetRNN.jpg) </center>
时序模型即为RNN模型, 包括简单的RNN模型、GRU模型、LSTM模型等。
- GRU模型配置:
```python
gru = simple_gru(input=emb, size=gru_size)
```
- LSTM模型配置:
```python
lstm = simple_lstm(input=emb, size=lstm_size)
```
针对本问题,我们采用单层LSTM模型,并使用了Dropout,效果总结:
<html>
<center>
<table border="2" cellspacing="0" cellpadding="6" rules="all" frame="border">
<thead>
<th scope="col" class="left">网络名称</th>
<th scope="col" class="left">参数数量</th>
<th scope="col" class="left">错误率</th>
</tr>
</thead>
<tbody>
<tr>
<td class="left">时序模型</td>
<td class="left">16 MB</td>
<td class="left">4.812%</td>
</tr>
</tbody>
</table></center>
</html>
<br>
## 优化算法(Optimization Algorithm)
<a href = "../../../doc/ui/trainer_config_helpers_api.html#module-paddle.trainer_config_helpers.optimizers">优化算法</a>包括
Momentum, RMSProp,AdaDelta,AdaGrad,ADAM,Adamax等,这里采用Adam优化方法,加了L2正则和梯度截断。
```python
settings(batch_size=128,
learning_rate=2e-3,
learning_method=AdamOptimizer(),
regularization=L2Regularization(8e-4),
gradient_clipping_threshold=25)
```
## 训练模型(Training Model)
在完成了数据和网络结构搭建之后, 我们进入到训练部分。
<center> ![](./PipelineTrain.jpg) </center>
训练脚本:我们将训练的命令行保存在了 `train.sh`文件中。训练时所需设置的主要参数如下:
```bash
paddle train \
--config=trainer_config.py \
--log_period=20 \
--save_dir=./output \
--num_passes=15 \
--use_gpu=false
```
这里没有介绍多机分布式训练,可以参考<a href = "../../cluster/index.html">分布式训练</a>的demo学习如何进行多机训练。
## 预测(Prediction)
可以使用训练好的模型评估带有label的验证集,也可以预测没有label的测试集。
<center> ![](./PipelineTest.jpg) </center>
测试脚本如下,将会测试配置文件中test.list指定的数据。
```bash
paddle train \
--use_gpu=false \
--job=test \
--init_model_path=./output/pass-0000x
```
可以参考<a href = "../../ui/predict/swig_py_paddle.html">Python API预测</a>
教程,或其他<a href = "../../demo/index.html">demo</a>的Python预测过程。也可以通过如下方式预测。
预测脚本(`predict.sh`):
```bash
model="output/pass-00003"
paddle train \
--config=trainer_config.lstm.py \
--use_gpu=false \
--job=test \
--init_model_path=$model \
--config_args=is_predict=1 \
--predict_output_dir=. \
mv rank-00000 result.txt
```
这里以`output/pass-00003`为例进行预测,用户可以根据训练log选择test结果最好的模型来预测。与训练网络配置不同的是:无需label相关的层,指定outputs输出概率层(softmax输出),
指定batch_size=1,数据传输无需label数据,预测数据指定test_list的位置。
预测结果以文本的形式保存在`result.txt`中,一行为一个样本,格式如下:
```
预测ID;ID为0的概率 ID为1的概率
预测ID;ID为0的概率 ID为1的概率
```
```
is_predict = get_config_arg('is_predict', bool, False)
trn = 'data/train.list' if not is_predict else None
tst = 'data/test.list' if not is_predict else 'data/pred.list'
obj = 'process' if not is_predict else 'process_pre'
batch_size = 128 if not is_predict else 1
if is_predict:
maxid = maxid_layer(output)
outputs([maxid,output])
else:
label = data_layer(name="label", size=2)
cls = classification_cost(input=output, label=label)
outputs(cls)
```
## 总体效果总结(Summary)
这些流程中的数据下载、网络配置、训练脚本在`/demo/quick_start`目录,我们在此总
结上述网络结构在Amazon-Elec测试集(25k)上的效果:
<center>
<table border="2" cellspacing="0" cellpadding="6" rules="all" frame="border">
<thead>
<th scope="col" class="left">网络名称</th>
<th scope="col" class="left">参数数量</th>
<th scope="col" class="left">错误率</th>
<th scope="col" class="left">配置文件</th>
</tr>
</thead>
<tbody>
<tr>
<td class="left">逻辑回归模型</td>
<td class="left"> 252KB </td>
<td class="left">8.652%</td>
<td class="left">trainer_config.lr.py</td>
</tr>
<tr>
<td class="left">词向量模型</td>
<td class="left"> 15MB </td>
<td class="left"> 8.484%</td>
<td class="left">trainer_config.emb.py</td>
</tr>
<tr>
<td class="left">卷积模型</td>
<td class="left"> 16MB </td>
<td class="left"> 5.628%</td>
<td class="left">trainer_config.cnn.py</td>
</tr>
<tr>
<td class="left">时序模型</td>
<td class="left"> 16MB </td>
<td class="left"> 4.812%</td>
<td class="left">trainer_config.lstm.py</td>
</tr>
</tbody>
</table>
</center>
<br>
## 附录(Appendix)
### 命令行参数(Command Line Argument)
* \--config:网络配置
* \--save_dir:模型存储路径
* \--log_period:每隔多少batch打印一次日志
* \--num_passes:训练轮次,一个pass表示过一遍所有训练样本
* \--config_args:命令指定的参数会传入网络配置中。
* \--init_model_path:指定初始化模型路径,可用在测试或训练时指定初始化模型。
默认一个pass保存一次模型,也可以通过saving_period_by_batches设置每隔多少batch保存一次模型。
可以通过show_parameter_stats_period设置打印参数信息等。
其他参数请参考<a href = "../../ui/index.html#command-line-argument">令行参数文档</a>
### 输出日志(Log)
```
TrainerInternal.cpp:160] Batch=20 samples=2560 AvgCost=0.628761 CurrentCost=0.628761 Eval: classification_error_evaluator=0.304297 CurrentEval: classification_error_evaluator=0.304297
```
模型训练会看到这样的日志,详细的参数解释如下面表格:
<center>
<table border="2" cellspacing="0" cellpadding="6" rules="all" frame="border">
<thead>
<th scope="col" class="left">名称</th>
<th scope="col" class="left">解释</th>
</tr>
</thead>
<tr>
<td class="left">Batch=20</td>
<td class="left"> 表示过了20个batch </td>
</tr>
<tr>
<td class="left">samples=2560</td>
<td class="left"> 表示过了2560个样本 </td>
</tr>
<tr>
<td class="left">AvgCost</td>
<td class="left"> 每个pass的第0个batch到当前batch所有样本的平均cost </td>
</tr>
<tr>
<td class="left">CurrentCost</td>
<td class="left"> 当前log_period个batch所有样本的平均cost </td>
</tr>
<tr>
<td class="left">Eval: classification_error_evaluator</td>
<td class="left"> 每个pass的第0个batch到当前batch所有样本的平均分类错误率 </td>
</tr>
<tr>
<td class="left">CurrentEval: classification_error_evaluator</td>
<td class="left"> 当前log_period个batch所有样本的平均分类错误率 </td>
</tr>
</tbody>
</table>
</center>
<br>
情感分析教程
===========================
.. toctree::
:maxdepth: 3
:glob:
Training Locally <sentiment_analysis.md>
\ No newline at end of file
构建PaddlePaddle的Docker Image
==============================
PaddlePaddle的Docker Image构建源码放置在 ``${源码根目录}/paddle/scripts/docker/`` 目录下。该目录有三类文件:
- Dockerfile:Docker Image的描述文件,包括构建步骤、各种参数和维护人员等。
- 一共维护了12个Dockerfile,Dockerfile.m4是它们的模板。
- PaddlePaddle中所有的Image都基于ubuntu 14.04。
- build.sh:Docker Image的构建脚本,使用方式见下一小节。
- generate.sh:通过Dockerfile.m4模板生成不同的Dockerfile。
使用脚本构建Docker Image
------------------------
进入源码目录,执行 ``docker build`` 命令,即可在本地编译出PaddlePaddle的镜像。简单的使用样例为
.. code-block:: bash
cd ${源码根目录}/paddle/scripts/docker/
docker build --build-arg LOWEST_DL_SPEED=50K \
--build-arg WITH_GPU=ON \
--tag paddle_gpu:latest .
其中,``--build-arg`` 传入的配置参数包括:
- LOWEST\_DL\_SPEED\: 在多线程下载过程中,设置下载线程的最低速度。
- 默认单位是Bytes,但可以传入10K、10M、或10G等这样的单位。
- 如果小于这个速度,那么这个线程将会关闭。当所有的线程都关闭了,那么下载进程将会重启。
- WITH\_GPU\: ON or OFF,是否开启GPU功能。注意,
- **编译** PaddlePaddle的GPU版本 **不一定** 要在具有GPU的机器上进行。
- **运行** PaddlePaddle的GPU版本 **一定** 要在具有GPU的机器上运行。
注意:所有Image的构建在Docker 1.12版本测试通过, 低于1.12的版本并没有测试。原因是旧版本可能缺乏 ``--build-arg`` 参数,从而不能在运行编译命令的时候接受参数。
PaddlePaddle文档
================
使用指南
--------
* `介绍 <introduction/index.html>`_
* `快速入门 <demo/quick_start/index.html>`_
* `基本使用概念 <concepts/use_concepts.html>`_
* `编译与安装 <build_and_install/index.html>`_
* `用户接口 <ui/index.html>`_
* `使用示例 <demo/index.html>`_
* `模型配置 <../doc/ui/api/trainer_config_helpers/index.html>`_
* `集群训练 <cluster/index.html>`_
开发指南
--------
* `新写Layer <../doc/dev/new_layer/index.html>`_
* `如何贡献文档 <howto/how_to_write_docs/index.html>`_
* `如何构建Docker Image <howto/build_docker_image.html>`_
算法教程
--------
* `Recurrent Group教程 <algorithm/rnn/rnn-tutorial.html>`_
* `单层RNN示例 <../doc/algorithm/rnn/rnn.html>`_
* :ref:`algo_hrnn_rnn_api_compare`
* `支持双层序列作为输入的Layer <algorithm/rnn/hierarchical-layer.html>`_
常见问题
--------
* `常见问题 <faq/index.html>`_
命令
====
安装好PaddlePaddle后,在命令行直接敲击 ``paddle`` 或 ``paddle --help`` 会显示如下一些命令。
* ``train`` Start a paddle_trainer
启动一个PaddlePaddle训练进程。 ``paddle train`` 可以通过命令行参数 ``-local=true`` 启动一个单机的训练进程;也可以和 ``paddle pserver`` 一起使用启动多机的分布式训练进程。
* ``pserver`` Start a paddle_pserver_main
在多机分布式训练下启动PaddlePaddle的parameter server进程。
* ``version`` Print paddle version
用于打印当前PaddlePaddle的版本和编译选项相关信息。常见的输出格式如下:1)第一行说明了PaddlePaddle的版本信息;2)第二行开始说明了一些主要的编译选项,具体意义可以参考 `编译参数选项文件 <../../build_and_install/cmake/compile_options.html>`_ 。
.. literalinclude:: paddle_version.txt
* ``merge_model`` Start a paddle_merge_model
用于将PaddlePaddle的模型参数文件和模型配置文件打包成一个文件,方便做部署分发。
* ``dump_config`` Dump the trainer config as proto string
用于将PaddlePaddle的模型配置文件以proto string的格式打印出来。
* ``make_diagram``
使用graphviz对PaddlePaddle的模型配置文件进行绘制。
\ No newline at end of file
PaddlePaddle 0.8.0b, compiled with
with_avx: ON
with_gpu: ON
with_double: OFF
with_python: ON
with_rdma: OFF
with_glog: ON
with_gflags: ON
with_metric_learning: OFF
with_timer: OFF
with_predict_sdk: OFF
########
用户接口
########
数据提供
========
.. toctree::
:maxdepth: 1
data_provider/dataprovider.rst
data_provider/pydataprovider2.rst
命令及命令行参数
================
.. toctree::
:maxdepth: 1
cmd/index.rst
* `参数用例 <../../doc/ui/cmd_argument/use_case.html>`_
* `参数分类 <../../doc/ui/cmd_argument/argument_outline.html>`_
* `参数描述 <../../doc/ui/cmd_argument/detail_introduction.html>`_
预测
=======
.. toctree::
:maxdepth: 1
predict/swig_py_paddle.rst
......@@ -17,22 +17,18 @@ add_library(paddle_api STATIC
${API_SOURCES})
add_dependencies(paddle_api gen_proto_cpp)
list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH)
if(WITH_GFLAGS)
list(LENGTH "${GFLAGS_LIBRARIES}" GFLAGS_LIBRARIES_LENGTH)
if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}")
# Because gflags compiled by cmake, so it is imported by cmake target,
# not a real library path. Get the real library path here.
message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}")
get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION)
message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}")
else()
set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES})
endif()
if(${GFLAGS_LIBRARIES_LENGTH} EQUAL 0 AND TARGET "${GFLAGS_LIBRARIES}")
# Because gflags compiled by cmake, so it is imported by cmake target,
# not a real library path. Get the real library path here.
message(STATUS "GFLAGS Libraries is ${GFLAGS_LIBRARIES}")
get_target_property(GFLAGS_LOCATION ${GFLAGS_LIBRARIES} LOCATION)
message(STATUS "GFLAGS Target location is ${GFLAGS_LOCATION}")
else()
set(GFLAGS_LOCATION ${GFLAGS_LIBRARIES})
endif()
configure_file(
paddle_api_config.py.in
${PROJ_ROOT}/paddle/api/paddle_api_config.py
......
......@@ -27,9 +27,9 @@ limitations under the License. */
using paddle::real;
P_DECLARE_string(config);
P_DECLARE_string(init_model_path);
P_DECLARE_int32(start_pass);
DECLARE_string(config);
DECLARE_string(init_model_path);
DECLARE_int32(start_pass);
struct TrainerPrivate : public paddle::Trainer {
bool _trainOneBatch(size_t batchSize);
......
......@@ -8,9 +8,7 @@ CMAKE_DL_LIBS="@CMAKE_DL_LIBS@"
WITH_PYTHON="@WITH_PYTHON@"
PYTHON_LIBRARIES="@PYTHON_LIBRARIES@"
WITH_GLOG="@WITH_GLOG@"
LIBGLOG_LIBRARY="@LIBGLOG_LIBRARY@"
WITH_GFLAGS="@WITH_GFLAGS@"
GFLAGS_LIBRARIES="@GFLAGS_LIBRARIES@"
GFLAGS_LOCATION="@GFLAGS_LOCATION@"
CBLAS_LIBRARIES="@CBLAS_LIBS@"
......
......@@ -47,10 +47,8 @@ try:
self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON)
self.python_libs = PYTHON_LIBRARIES
self.with_glog = PaddleLDFlag.cmake_bool(WITH_GLOG)
self.glog_libs = LIBGLOG_LIBRARY
self.with_gflags = PaddleLDFlag.cmake_bool(WITH_GFLAGS)
self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS)
self.gflags_libs = GFLAGS_LIBRARIES
self.gflags_location = GFLAGS_LOCATION
......@@ -88,6 +86,8 @@ try:
"-lpaddle_cuda",
"-lpaddle_api",
self.normalize_flag(self.protolib),
self.normalize_flag(self.glog_libs),
self.normalize_flag(self.gflags_libs),
self.normalize_flag(self.zlib),
self.normalize_flag(self.thread),
self.normalize_flag(self.dl_libs),
......@@ -96,10 +96,6 @@ try:
if self.with_python:
libs.append(self.normalize_flag(self.python_libs))
if self.with_glog:
libs.append(self.normalize_flag(self.glog_libs))
if self.with_gflags:
libs.append(self.normalize_flag(self.gflags_libs))
if self.with_gpu:
libs.append(self.normalize_flag(self.curt))
if self.with_coverage:
......
......@@ -21,7 +21,7 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Logging.h"
P_DEFINE_int32(cudnn_conv_workspace_limit_in_mb,
DEFINE_int32(cudnn_conv_workspace_limit_in_mb,
4096,
"Specify cuDNN max workspace limit, in units MB, "
"4096MB=4GB by default.");
......
......@@ -12,6 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
// clang-format off
// Because clang-format 4.X and clang-format 3.8+ format
// following lines in different. So disable clang-format.
#include "hl_cuda.h"
#include <cuda_profiler_api.h>
#include <string.h>
......@@ -19,10 +22,12 @@ limitations under the License. */
#include <sys/time.h>
#include <unistd.h>
#include <mutex>
#include "hl_cuda.h"
#include "hl_cuda.ph"
#include "hl_dso_loader.h"
#include "hl_thread.ph"
#include "paddle/utils/Logging.h"
// clang-format on
namespace dynload {
......
......@@ -16,13 +16,13 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Logging.h"
P_DEFINE_string(cudnn_dir,
DEFINE_string(cudnn_dir,
"",
"Specify path for loading libcudnn.so. For instance, "
"/usr/local/cudnn/lib. If empty [default], dlopen "
"will search cudnn from LD_LIBRARY_PATH");
P_DEFINE_string(cuda_dir,
DEFINE_string(cuda_dir,
"",
"Specify path for loading cuda library, such as libcublas, "
"libcurand. For instance, /usr/local/cuda/lib64. (Note: "
......@@ -30,7 +30,7 @@ P_DEFINE_string(cuda_dir,
"build-in function in cudart already ran before main entry). "
"If default, dlopen will search cuda from LD_LIBRARY_PATH");
P_DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so.");
DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so.");
static inline std::string join(const std::string& part1,
const std::string& part2) {
......
......@@ -22,7 +22,7 @@ limitations under the License. */
#include "DataProviderGroup.h"
#include "paddle/utils/Logging.h"
P_DEFINE_double(memory_threshold_on_load_data,
DEFINE_double(memory_threshold_on_load_data,
1.0,
"stop loading data when memory is not sufficient");
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/gserver/gradientmachines/NeuralNetwork.h"
P_DECLARE_int32(trainer_id);
DECLARE_int32(trainer_id);
namespace paddle {
......
......@@ -21,11 +21,11 @@ limitations under the License. */
#include "NeuralNetwork.h"
#include "ParallelNeuralNetwork.h"
P_DEFINE_bool(allow_only_one_model_on_one_gpu,
DEFINE_bool(allow_only_one_model_on_one_gpu,
true,
"If true, do not allow multiple models on one GPU device");
#ifdef PADDLE_METRIC_LEARNING
P_DECLARE_bool(external);
DECLARE_bool(external);
#endif
namespace paddle {
......
......@@ -24,7 +24,7 @@ limitations under the License. */
#include "paddle/utils/Stat.h"
#include "paddle/utils/Util.h"
P_DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so");
DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so");
static const char* DIY_CALC_PROB_SYMBOL_NAME = "calc_prob";
static const char* DIY_START_CALC_PROB_SYMBOL_NAME = "start_calc_prob";
......
......@@ -54,7 +54,7 @@ void DataLayer::copyDataToOutput(Argument& output) {
output.setFrameWidth(config_.width());
} else {
output.setFrameHeight(data_.getFrameHeight());
output.setFrameHeight(data_.getFrameHeight());
output.setFrameWidth(data_.getFrameWidth());
}
output.cpuSequenceDims = data_.cpuSequenceDims;
output.sequenceStartPositions = data_.sequenceStartPositions;
......
......@@ -33,7 +33,7 @@ limitations under the License. */
#include "TransLayer.h"
#include "ValidationLayer.h"
P_DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");
DEFINE_bool(log_error_clipping, false, "enable log error clipping or not");
namespace paddle {
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/math/Matrix.h"
#include "paddle/utils/Stat.h"
P_DECLARE_bool(prev_batch_state);
DECLARE_bool(prev_batch_state);
namespace paddle {
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Stat.h"
P_DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation.");
DEFINE_bool(rnn_use_batch, false, "Using the batch method for calculation.");
namespace paddle {
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#include "Layer.h"
#include "paddle/gserver/evaluators/Evaluator.h"
P_DECLARE_int32(trainer_id);
DECLARE_int32(trainer_id);
namespace paddle {
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#include "LayerGradUtil.h"
P_DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(thread_local_rand_use_global_seed);
namespace paddle {
real getCostSum(LayerPtr& testLayer, MatrixPtr weights) {
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/math/SparseMatrix.h"
#include "paddle/utils/CommandLineParser.h"
P_DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length");
DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length");
namespace paddle {
......
......@@ -25,8 +25,8 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
P_DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(use_gpu);
DECLARE_bool(thread_local_rand_use_global_seed);
void testActivation(const string& act) {
LOG(INFO) << "test activation: " << act;
......
......@@ -27,11 +27,11 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
P_DECLARE_int32(gpu_id);
P_DECLARE_double(checkgrad_eps);
P_DECLARE_bool(thread_local_rand_use_global_seed);
P_DECLARE_bool(prev_batch_state);
DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id);
DECLARE_double(checkgrad_eps);
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(prev_batch_state);
// Test that the batchNormLayer can be followed by a ConvLayer
TEST(Layer, batchNorm) {
......
......@@ -28,11 +28,11 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
P_DECLARE_int32(gpu_id);
P_DECLARE_double(checkgrad_eps);
P_DECLARE_bool(thread_local_rand_use_global_seed);
P_DECLARE_bool(prev_batch_state);
DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id);
DECLARE_double(checkgrad_eps);
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(prev_batch_state);
// Test that the convTrans forward is the same as conv backward
TEST(Layer, convTransLayerFwd) {
......
......@@ -28,11 +28,11 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
P_DECLARE_int32(gpu_id);
P_DECLARE_double(checkgrad_eps);
P_DECLARE_bool(thread_local_rand_use_global_seed);
P_DECLARE_bool(prev_batch_state);
DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id);
DECLARE_double(checkgrad_eps);
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(prev_batch_state);
// Do one forward pass of convTrans layer and check to see if its output
// matches the given result
......
......@@ -21,9 +21,9 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
P_DECLARE_int32(gpu_id);
P_DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id);
DECLARE_bool(thread_local_rand_use_global_seed);
enum InputType {
INPUT_DATA, // dense vector
......
......@@ -26,11 +26,11 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
P_DECLARE_int32(gpu_id);
P_DECLARE_double(checkgrad_eps);
P_DECLARE_bool(thread_local_rand_use_global_seed);
P_DECLARE_bool(prev_batch_state);
DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id);
DECLARE_double(checkgrad_eps);
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_bool(prev_batch_state);
TEST(Operator, dot_mul) {
TestConfig config;
......
......@@ -25,10 +25,10 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_int32(gpu_id);
P_DECLARE_double(checkgrad_eps);
P_DEFINE_bool(use_label, true, "input label or sequence label");
P_DEFINE_bool(static_para, false, "static parameter");
DECLARE_int32(gpu_id);
DECLARE_double(checkgrad_eps);
DEFINE_bool(use_label, true, "input label or sequence label");
DEFINE_bool(static_para, false, "static parameter");
struct DataIn {
std::vector<Argument> inArgs;
......@@ -267,8 +267,8 @@ TEST(Compare, img_conv2) {
}
#endif
P_DEFINE_string(config_file_a, "", "config of one network to compare");
P_DEFINE_string(config_file_b, "", "config of another network to compare");
DEFINE_string(config_file_a, "", "config of one network to compare");
DEFINE_string(config_file_b, "", "config of another network to compare");
TEST(Compare, network) {
if (FLAGS_config_file_a != "" && FLAGS_config_file_b != "") {
compareNetwork(FLAGS_config_file_a, FLAGS_config_file_b);
......
......@@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/utils/PythonUtil.h"
#include "paddle/utils/Util.h"
P_DEFINE_string(train_list, "unittest.list", "file list for unittest");
DEFINE_string(train_list, "unittest.list", "file list for unittest");
namespace paddle {
namespace unittest {
......
......@@ -20,7 +20,7 @@ limitations under the License. */
#include <paddle/utils/Util.h>
#include <paddle/utils/Version.h>
P_DECLARE_int32(seed);
DECLARE_int32(seed);
using namespace paddle; // NOLINT
using namespace std; // NOLINT
......
......@@ -23,9 +23,9 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
P_DECLARE_bool(rnn_use_batch);
P_DECLARE_int32(fixed_seq_length);
DECLARE_bool(use_gpu);
DECLARE_bool(rnn_use_batch);
DECLARE_int32(fixed_seq_length);
void checkError(const Matrix& matrix1, const Matrix& matrix2) {
CHECK(matrix1.getHeight() == matrix2.getHeight());
......
......@@ -29,11 +29,11 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
P_DECLARE_int32(num_passes);
P_DECLARE_string(config);
P_DECLARE_string(init_model_path);
P_DECLARE_string(config_args);
DECLARE_bool(use_gpu);
DECLARE_int32(num_passes);
DECLARE_string(config);
DECLARE_string(init_model_path);
DECLARE_string(config_args);
size_t fcLayerWidth = 1024;
......
......@@ -25,7 +25,7 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_bool(use_gpu);
DECLARE_bool(use_gpu);
const real* getData(const Matrix& matrix) {
if (matrix.useGpu()) {
......
......@@ -408,7 +408,7 @@ public:
LOG(FATAL) << "Not implemented";
}
virtual void addBias(Matrix& b, real scale, bool sharedBias) {
void addBias(Matrix& b, real scale, bool sharedBias) {
if (!sharedBias) {
addBias(b, scale);
} else {
......@@ -425,7 +425,7 @@ public:
LOG(FATAL) << "Not implemented";
}
virtual void collectBias(Matrix& a, real scale, bool sharedBias) {
void collectBias(Matrix& a, real scale, bool sharedBias) {
if (!sharedBias) {
collectBias(a, scale);
} else {
......
......@@ -24,7 +24,7 @@ limitations under the License. */
#include "paddle/utils/Thread.h"
#include "paddle/utils/Util.h"
P_DEFINE_bool(allow_inefficient_sparse_update,
DEFINE_bool(allow_inefficient_sparse_update,
false,
"Whether to allow inefficient sparse update");
......
......@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Util.h"
P_DECLARE_bool(allow_inefficient_sparse_update);
DECLARE_bool(allow_inefficient_sparse_update);
namespace paddle {
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "Allocator.h"
#include "paddle/utils/Util.h"
P_DEFINE_int32(pool_limit_size,
DEFINE_int32(pool_limit_size,
536870912,
"maximum memory size managed by a memory pool, default is 512M");
......
......@@ -16,12 +16,10 @@ add_simple_unittest(test_CpuGpuVector)
add_simple_unittest(test_Allocator)
if(WITH_GPU)
if(COMPILER_SUPPORT_CXX11)
CUDA_ADD_EXECUTABLE(test_Tensor test_Tensor.cu)
link_paddle_test(test_Tensor)
CUDA_ADD_EXECUTABLE(test_lazyAssign test_lazyAssign.cu)
link_paddle_test(test_lazyAssign)
endif()
else()
compile_cu_as_cpp(test_Tensor.cu)
add_unittest(test_Tensor test_Tensor.cu)
......
......@@ -22,9 +22,9 @@ limitations under the License. */
using namespace paddle; // NOLINT
#ifndef PADDLE_TYPE_DOUBLE
P_DEFINE_double(max_diff, 1e-5, "max diff allowed");
DEFINE_double(max_diff, 1e-5, "max diff allowed");
#else
P_DEFINE_double(max_diff, 1e-13, "max diff allowed");
DEFINE_double(max_diff, 1e-13, "max diff allowed");
#endif
class SetMaxDiff {
......
......@@ -245,6 +245,8 @@ int32_t Argument::resizeAndCopyFrom(const Argument& src,
bool useGpu,
hl_stream_t stream) {
dataId = src.dataId;
frameWidth = src.frameWidth;
frameHeight = src.frameHeight;
if (!src.sequenceStartPositions) {
// non-sequence input, copy samples directly
......
......@@ -19,7 +19,7 @@ limitations under the License. */
#include <cmath>
P_DEFINE_bool(log_clipping, false, "enable log clipping or not");
DEFINE_bool(log_clipping, false, "enable log clipping or not");
namespace paddle {
......
......@@ -26,11 +26,11 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Logging.h"
P_DEFINE_int32(enable_grad_share,
DEFINE_int32(enable_grad_share,
(100 * 1024 * 1024),
"threshold for enable gradient parameter share for batch "
"multi-cpu training");
P_DEFINE_int32(
DEFINE_int32(
grad_share_block_num,
64,
"block number of gradient parameter share for batch multi-cpu training");
......
......@@ -18,7 +18,7 @@ limitations under the License. */
#include "paddle/utils/CommandLineParser.h"
#include "paddle/utils/Stat.h"
P_DECLARE_string(pservers);
DECLARE_string(pservers);
namespace paddle {
......
......@@ -31,7 +31,7 @@ limitations under the License. */
#include "paddle/utils/Util.h"
/// quick ack can reduce the latency of small message
P_DEFINE_bool(small_messages,
DEFINE_bool(small_messages,
false,
"if message size is small, recommend set it True to enable quick "
"ack and no delay");
......@@ -39,13 +39,13 @@ P_DEFINE_bool(small_messages,
/// reasonable sock_send_buf_size can control the traffic injected into switch
/// network. Injecting too many data into traffic could cause packets loss which
/// cause long latency and degrade the efficiency of communication.
P_DEFINE_int32(sock_send_buf_size,
DEFINE_int32(sock_send_buf_size,
1024 * 1024 * 40,
"restrict sock send buff size, can reduce network congestion if "
"set carefully");
/// reasonable size can hold bursted packets and reduce packets loss
P_DEFINE_int32(sock_recv_buf_size,
DEFINE_int32(sock_recv_buf_size,
1024 * 1024 * 40,
"restrict sock recv buff size");
......
......@@ -20,8 +20,8 @@ limitations under the License. */
#include "paddle/utils/Stat.h"
#include "paddle/utils/StringUtil.h"
P_DEFINE_string(pservers, "127.0.0.1", "Comma separated addresses of pservers");
P_DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send");
DEFINE_string(pservers, "127.0.0.1", "Comma separated addresses of pservers");
DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send");
namespace paddle {
......
......@@ -34,7 +34,7 @@ limitations under the License. */
#include "ProtoServer.h"
#include "SparseParameterDistribution.h"
P_DECLARE_int32(parallel_thread_num);
DECLARE_int32(parallel_thread_num);
namespace paddle {
......
......@@ -30,11 +30,11 @@ limitations under the License. */
#include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/Stat.h"
P_DEFINE_int32(pserver_num_threads, 1, "number of threads for sync op exec");
P_DEFINE_double(async_lagged_ratio_min,
DEFINE_int32(pserver_num_threads, 1, "number of threads for sync op exec");
DEFINE_double(async_lagged_ratio_min,
1.0,
"control config_.async_lagged_grad_discard_ratio() min value");
P_DEFINE_double(
DEFINE_double(
async_lagged_ratio_default,
1.5,
"if async_lagged_grad_discard_ratio is not set in trainer_config.conf"
......
......@@ -38,7 +38,7 @@ limitations under the License. */
#include "ProtoServer.h"
P_DECLARE_int32(port);
DECLARE_int32(port);
namespace paddle {
......
......@@ -20,23 +20,23 @@ limitations under the License. */
#include "SparseParameterDistribution.h"
P_DEFINE_bool(check_sparse_distribution_in_pserver,
DEFINE_bool(check_sparse_distribution_in_pserver,
false,
"check whether sparse parameter exhibts balanced distribution at "
"all pservers");
P_DEFINE_bool(show_check_sparse_distribution_log,
DEFINE_bool(show_check_sparse_distribution_log,
false,
"show logs details for sparse parameter distribution in pserver");
P_DEFINE_int32(check_sparse_distribution_batches,
DEFINE_int32(check_sparse_distribution_batches,
100,
"run sparse parameter distribution check for N batches");
P_DEFINE_double(
DEFINE_double(
check_sparse_distribution_ratio,
0.6,
"if parameters dispatched to different pservers exhibit unbalanced "
" distribution for check_sparse_distribution_ratio * "
" check_sparse_distribution_batches times, crash program");
P_DEFINE_double(check_sparse_distribution_unbalance_degree,
DEFINE_double(check_sparse_distribution_unbalance_degree,
2.0,
"the ratio of maximum data size and minimun data size for "
"different pserver");
......
......@@ -195,9 +195,9 @@ SocketClient::SocketClient(const std::string& serverAddr, int serverPort) {
channel_.reset(new SocketChannel(sockfd));
}
P_DEFINE_string(server_addr, "127.0.0.1", "Server address");
P_DEFINE_int64(dim, 10000000, "Data size");
P_DEFINE_int32(loop_time, 100000, "test loop time");
DEFINE_string(server_addr, "127.0.0.1", "Server address");
DEFINE_int64(dim, 10000000, "Data size");
DEFINE_int32(loop_time, 100000, "test loop time");
using namespace paddle; // NOLINT
......
......@@ -21,9 +21,9 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_int32(num_gradient_servers);
P_DEFINE_string(server_addr, "127.0.0.1", "assign server address");
P_DEFINE_int32(server_cpu, 0, "assign server cpu");
DECLARE_int32(num_gradient_servers);
DEFINE_string(server_addr, "127.0.0.1", "assign server address");
DEFINE_int32(server_cpu, 0, "assign server cpu");
class ParameterServer2Tester : public ParameterServer2 {
public:
......
......@@ -21,10 +21,10 @@ limitations under the License. */
#include "paddle/pserver/ProtoServer.h"
#include "paddle/utils/Stat.h"
P_DEFINE_string(server_addr, "127.0.0.1", "Server address");
P_DEFINE_int64(dim, 50000000, "Data size");
P_DEFINE_bool(test_proto_server, true, "whether to test ProtoServer");
P_DEFINE_bool(benchmark, false, "Do benchmark. Skip some tests");
DEFINE_string(server_addr, "127.0.0.1", "Server address");
DEFINE_int64(dim, 50000000, "Data size");
DEFINE_bool(test_proto_server, true, "whether to test ProtoServer");
DEFINE_bool(benchmark, false, "Do benchmark. Skip some tests");
using namespace paddle; // NOLINT
......
......@@ -21,8 +21,6 @@ function version(){
echo " with_double: @WITH_DOUBLE@"
echo " with_python: @WITH_PYTHON@"
echo " with_rdma: @WITH_RDMA@"
echo " with_glog: @WITH_GLOG@"
echo " with_gflags: @WITH_GFLAGS@"
echo " with_metric_learning: @WITH_METRIC@"
echo " with_timer: @WITH_TIMER@"
echo " with_predict_sdk: @WITH_PREDICT_SDK@"
......
......@@ -19,8 +19,8 @@ limitations under the License. */
#include "paddle/pserver/ParameterServer2.h"
#include "paddle/utils/PythonUtil.h"
P_DEFINE_string(model_dir, "", "Directory for separated model files");
P_DEFINE_string(model_file, "", "File for merged model file");
DEFINE_string(model_dir, "", "Directory for separated model files");
DEFINE_string(model_file, "", "File for merged model file");
using namespace paddle; // NOLINT
using namespace std; // NOLINT
......
......@@ -17,8 +17,8 @@ limitations under the License. */
#include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/Stat.h"
P_DECLARE_int32(trainer_id);
P_DECLARE_string(save_dir);
DECLARE_int32(trainer_id);
DECLARE_string(save_dir);
namespace paddle {
......
......@@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/math/SparseRowMatrix.h"
#include "paddle/utils/Thread.h"
P_DECLARE_int32(trainer_count);
DECLARE_int32(trainer_count);
namespace paddle {
......
......@@ -38,60 +38,56 @@ limitations under the License. */
#include "paddle/gserver/gradientmachines/NeuralNetwork.h"
#include "paddle/gserver/layers/ValidationLayer.h"
P_DEFINE_string(config, "", "Trainer config file");
DEFINE_string(config, "", "Trainer config file");
P_DEFINE_int32(test_period,
DEFINE_int32(test_period,
0,
"if equal 0, do test on all test data at the end of "
"each pass. While if equal non-zero, do test on all test "
"data every test_period batches");
P_DEFINE_bool(test_all_data_in_one_period,
DEFINE_bool(test_all_data_in_one_period,
false,
"This option was deprecated, since we will always do "
"test on all test set ");
P_DEFINE_bool(local, true, "Train in local mode or not");
DEFINE_bool(local, true, "Train in local mode or not");
P_DEFINE_int32(average_test_period,
DEFINE_int32(average_test_period,
0,
"Do test on average parameter every so"
" many batches. MUST be devided by FLAGS_log_period."
" Default 0 means do not test average parameter");
P_DEFINE_int32(saving_period, 1, "Save parameteres every so many passes");
P_DEFINE_int64(saving_period_by_batches,
DEFINE_int32(saving_period, 1, "Save parameteres every so many passes");
DEFINE_int64(saving_period_by_batches,
0,
"Save parameters every so many batches in one pass");
P_DEFINE_string(save_dir, "", "Directory for saving model parameter");
P_DEFINE_int32(start_pass,
DEFINE_string(save_dir, "", "Directory for saving model parameter");
DEFINE_int32(start_pass,
0,
"Start training from this pass. "
"Will load parameter from the previous pass");
P_DEFINE_int32(test_pass,
-1,
"Will load parameter start from this pass to test");
P_DEFINE_int32(test_wait, 0, "Waiting for pass parameter if not exist");
P_DEFINE_bool(with_cost, true, "enable cost layer or not");
P_DEFINE_bool(distribute_test, false, "test in distribute mode");
DEFINE_int32(test_pass, -1, "Will load parameter start from this pass to test");
DEFINE_int32(test_wait, 0, "Waiting for pass parameter if not exist");
DEFINE_bool(with_cost, true, "enable cost layer or not");
DEFINE_bool(distribute_test, false, "test in distribute mode");
P_DEFINE_int32(num_passes, 100, "train for so many passes");
DEFINE_int32(num_passes, 100, "train for so many passes");
P_DEFINE_string(config_args,
DEFINE_string(config_args,
"",
"arguments passed to config file."
"Format: key1=value1,key2=value2");
P_DEFINE_bool(save_only_one,
DEFINE_bool(save_only_one,
false,
"Save only parameters in last pass, remove previous.");
P_DEFINE_string(feat_file, "", "File name of extracted feature.");
P_DEFINE_string(predict_output_dir,
DEFINE_string(feat_file, "", "File name of extracted feature.");
DEFINE_string(predict_output_dir,
"",
"Directory that saves the predicted results of output layers");
P_DEFINE_string(model_list,
"",
"File that saves the model list when evaluation");
DEFINE_string(model_list, "", "File that saves the model list when evaluation");
namespace paddle {
......
......@@ -34,7 +34,7 @@ limitations under the License. */
#include "paddle/internals/metric_learning/MetricTrainer.h"
#endif
P_DECLARE_int32(num_passes);
DECLARE_int32(num_passes);
namespace paddle {
......
......@@ -18,9 +18,9 @@ limitations under the License. */
#include "paddle/utils/Stat.h"
#include "paddle/utils/Util.h"
P_DECLARE_int32(test_period);
DECLARE_int32(test_period);
P_DEFINE_bool(feed_data, false, "Wether to read data from DataProvider.");
DEFINE_bool(feed_data, false, "Wether to read data from DataProvider.");
namespace paddle {
......
......@@ -18,16 +18,16 @@ limitations under the License. */
#include "paddle/utils/Flags.h"
#include "paddle/utils/PythonUtil.h"
P_DECLARE_string(config);
P_DECLARE_string(init_model_path);
P_DECLARE_int32(start_pass);
P_DECLARE_string(save_dir);
P_DECLARE_int32(trainer_id);
P_DECLARE_bool(local);
P_DECLARE_bool(with_cost);
P_DECLARE_bool(with_gpu);
P_DECLARE_bool(parallel_nn);
P_DECLARE_string(config_args);
DECLARE_string(config);
DECLARE_string(init_model_path);
DECLARE_int32(start_pass);
DECLARE_string(save_dir);
DECLARE_int32(trainer_id);
DECLARE_bool(local);
DECLARE_bool(with_cost);
DECLARE_bool(with_gpu);
DECLARE_bool(parallel_nn);
DECLARE_string(config_args);
const char *kConfigParserModuleName = "paddle.trainer.config_parser";
const char *kConfigParserFuncName = "parse_config_and_serialize";
......
......@@ -14,17 +14,17 @@ limitations under the License. */
#include "TrainerInternalConfig.h"
P_DEFINE_int32(show_parameter_stats_period,
DEFINE_int32(show_parameter_stats_period,
0,
"Whether to show parameter stats during training");
P_DEFINE_int32(dot_period, 1, "Print '.' every so many batches");
DEFINE_int32(dot_period, 1, "Print '.' every so many batches");
P_DEFINE_bool(use_old_updater, false, "Use the old RemoteParameterUpdater");
DEFINE_bool(use_old_updater, false, "Use the old RemoteParameterUpdater");
P_DECLARE_int32(num_passes);
DECLARE_int32(num_passes);
P_DECLARE_bool(local);
DECLARE_bool(local);
namespace paddle {
......
......@@ -22,21 +22,20 @@ limitations under the License. */
#include "Trainer.h"
#include "paddle/pserver/RDMANetwork.h"
P_DEFINE_bool(start_pserver, false, "Whether to start pserver");
P_DECLARE_int32(gpu_id);
P_DEFINE_string(job, "train", "one of (train, test, checkgrad)");
P_DECLARE_int32(start_pass);
P_DECLARE_string(config);
P_DECLARE_string(init_model_path);
P_DECLARE_string(rdma_tcp);
DEFINE_bool(start_pserver, false, "Whether to start pserver");
DECLARE_int32(gpu_id);
DEFINE_string(job, "train", "one of (train, test, checkgrad)");
DECLARE_int32(start_pass);
DECLARE_string(config);
DECLARE_string(init_model_path);
DECLARE_string(rdma_tcp);
using namespace paddle; // NOLINT
int main(int argc, char** argv) {
// write logs instantly (never buffer log messages)
#ifdef PADDLE_USE_GLOG
// write logs instantly (never buffer log messages)
FLAGS_logbuflevel = -1;
#endif
initMain(argc, argv);
initPython(argc, argv);
......
......@@ -24,10 +24,10 @@ using namespace std; // NOLINT
static const string& configFile = "trainer/tests/sample_trainer_config.conf";
P_DECLARE_int32(gpu_id);
P_DECLARE_bool(use_gpu);
P_DECLARE_string(config);
P_DECLARE_string(config_args);
DECLARE_int32(gpu_id);
DECLARE_bool(use_gpu);
DECLARE_string(config);
DECLARE_string(config_args);
struct comData {
vector<Argument> outArgs;
......
......@@ -25,20 +25,20 @@ using namespace std; // NOLINT
static const string& configFile1 =
"trainer/tests/sample_trainer_config_qb_rnn.conf";
P_DECLARE_bool(use_gpu);
P_DECLARE_string(config);
P_DECLARE_int32(gpu_id);
P_DECLARE_int32(seed);
P_DECLARE_int32(num_passes);
P_DECLARE_int32(saving_period);
P_DECLARE_int32(num_gradient_servers);
P_DECLARE_int32(port);
P_DECLARE_bool(local);
P_DECLARE_bool(use_old_updater);
P_DECLARE_bool(parallel_nn);
P_DECLARE_string(config_args);
P_DEFINE_double(max_diff_ratio,
DECLARE_bool(use_gpu);
DECLARE_string(config);
DECLARE_int32(gpu_id);
DECLARE_int32(seed);
DECLARE_int32(num_passes);
DECLARE_int32(saving_period);
DECLARE_int32(num_gradient_servers);
DECLARE_int32(port);
DECLARE_bool(local);
DECLARE_bool(use_old_updater);
DECLARE_bool(parallel_nn);
DECLARE_string(config_args);
DEFINE_double(max_diff_ratio,
0.0f,
"max diff ratio allowed for parameters value");
......
......@@ -22,25 +22,25 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_int32(gpu_id);
DECLARE_int32(gpu_id);
P_DECLARE_bool(local);
P_DECLARE_bool(use_gpu);
DECLARE_bool(local);
DECLARE_bool(use_gpu);
P_DECLARE_string(config);
P_DECLARE_string(nics);
DECLARE_string(config);
DECLARE_string(nics);
P_DEFINE_string(config_file_a, "", "config of one network to compare");
P_DEFINE_string(config_file_b, "", "config of another network to compare");
P_DEFINE_bool(need_high_accuracy,
DEFINE_string(config_file_a, "", "config of one network to compare");
DEFINE_string(config_file_b, "", "config of another network to compare");
DEFINE_bool(need_high_accuracy,
false,
"whether need to run in double accuracy");
P_DEFINE_double(
DEFINE_double(
max_diff_ratio,
0.0f,
"max diff ratio allowed for outputs and parameters (value/gradient)");
P_DECLARE_bool(thread_local_rand_use_global_seed);
P_DECLARE_int32(seed);
DECLARE_bool(thread_local_rand_use_global_seed);
DECLARE_int32(seed);
struct ComData {
vector<Argument> outArgs;
......
......@@ -22,20 +22,20 @@ limitations under the License. */
using namespace paddle; // NOLINT
using namespace std; // NOLINT
P_DECLARE_int32(gpu_id);
DECLARE_int32(gpu_id);
P_DECLARE_bool(local);
P_DECLARE_bool(use_gpu);
DECLARE_bool(local);
DECLARE_bool(use_gpu);
P_DECLARE_string(config);
P_DECLARE_string(nics);
DECLARE_string(config);
DECLARE_string(nics);
P_DEFINE_string(config_file_a, "", "config of one network to compare");
P_DEFINE_string(config_file_b, "", "config of another network to compare");
P_DEFINE_bool(need_high_accuracy,
DEFINE_string(config_file_a, "", "config of one network to compare");
DEFINE_string(config_file_b, "", "config of another network to compare");
DEFINE_bool(need_high_accuracy,
true,
"whether need to run in double accuracy (recommended)");
P_DEFINE_double(
DEFINE_double(
max_diff_ratio,
0.0f,
"max diff ratio allowed for outputs and parameters (value/gradient)");
......
......@@ -18,9 +18,9 @@ limitations under the License. */
#include <gtest/gtest.h>
P_DECLARE_string(config);
P_DECLARE_string(config_args);
P_DEFINE_string(merger,
DECLARE_string(config);
DECLARE_string(config_args);
DEFINE_string(merger,
"./paddle_merge_model",
"path to paddle_merge_model binary");
......
......@@ -28,10 +28,10 @@ static const string& configFile3 = "trainer/tests/chunking.conf";
static const string& configFile4 =
"trainer/tests/sample_trainer_config_parallel.conf";
P_DECLARE_bool(use_gpu);
P_DECLARE_string(config);
P_DECLARE_int32(gpu_id);
P_DECLARE_bool(allow_only_one_model_on_one_gpu);
DECLARE_bool(use_gpu);
DECLARE_string(config);
DECLARE_int32(gpu_id);
DECLARE_bool(allow_only_one_model_on_one_gpu);
void checkGradientTest(const string& configFile,
bool useGpu,
......
......@@ -27,12 +27,12 @@ static const string& configFile1 = "trainer/tests/sample_trainer_config.conf";
static const string& configFile2 =
"trainer/tests/sample_trainer_config_parallel.conf";
P_DECLARE_bool(use_gpu);
P_DECLARE_string(config);
P_DECLARE_int32(gpu_id);
P_DECLARE_int32(seed);
P_DECLARE_int32(num_passes);
P_DECLARE_int32(saving_period);
DECLARE_bool(use_gpu);
DECLARE_string(config);
DECLARE_int32(gpu_id);
DECLARE_int32(seed);
DECLARE_int32(num_passes);
DECLARE_int32(saving_period);
class TrainerForTest : public paddle::Trainer {
public:
......@@ -122,10 +122,10 @@ TEST(average_window_cpu, gpu4) {
#endif
// 3. test trainer + pserver.
P_DECLARE_int32(num_gradient_servers);
P_DECLARE_int32(port);
P_DECLARE_bool(local);
P_DECLARE_bool(use_old_updater);
DECLARE_int32(num_gradient_servers);
DECLARE_int32(port);
DECLARE_bool(local);
DECLARE_bool(use_old_updater);
double checkRemoteParameterUpdater(TrainerForTest& trainer) {
auto gradientMachine = trainer.getGradientMachine();
......
......@@ -30,7 +30,7 @@ static string modelDir = "trainer/tests/rnn_gen_test_model_dir/t1"; // NOLINT
static string expectFile = // NOLINT
"trainer/tests/rnn_gen_test_model_dir/r1.test"; // NOLINT
P_DECLARE_string(config_args);
DECLARE_string(config_args);
vector<float> readRetFile(const string& fname) {
ifstream inFile(fname);
......
......@@ -20,13 +20,13 @@ limitations under the License. */
#include "paddle/utils/Flags.h"
#include "paddle/utils/Stat.h"
P_DEFINE_bool(log_barrier_abstract,
DEFINE_bool(log_barrier_abstract,
true,
"if true, show abstract of barrier performance");
P_DEFINE_int32(log_barrier_lowest_nodes,
DEFINE_int32(log_barrier_lowest_nodes,
5,
"how many lowest node will be logged");
P_DEFINE_bool(log_barrier_show_log,
DEFINE_bool(log_barrier_show_log,
false, // for performance tuning insight
"if true, always show barrier abstract even with little gap");
......
......@@ -13,220 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "CommandLineParser.h"
#ifndef PADDLE_USE_GFLAGS
#include <stdlib.h>
#include <algorithm>
#include <iomanip>
#include <iostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "paddle/utils/StringUtil.h"
namespace paddle {
static constexpr int kStatusOK = 0;
static constexpr int kStatusInvalid = 1;
static constexpr int kStatusNotFound = 2;
/**
* \brief: Convert a string to any type value.
*
* \note: It will specialize by type T that is supported.
*/
template <typename T>
bool StringToValue(const std::string& content, T* value) {
bool ok;
*value = str::toWithStatus<T>(content, &ok);
return ok;
}
template <>
bool StringToValue<bool>(const std::string& content, bool* value) {
std::string tmp = content;
std::transform(tmp.begin(), tmp.end(), tmp.begin(), [](char in) -> char {
if (in <= 'Z' && in >= 'A') {
return in - ('Z' - 'z');
} else {
return in;
}
}); // tolower.
if (tmp == "true" || tmp == "1") {
*value = true;
return true;
} else if (tmp == "false" || tmp == "0") {
*value = false;
return true;
} else {
return false;
}
}
template <>
bool StringToValue<std::string>(const std::string& content,
std::string* value) {
*value = content;
return true;
}
/**
* \brief Parse argument "--blah=blah".
*
* \param argument: The command line argument string, such as "--blah=blah"
* \param [out] extraInfo: The details error message for parse argument.
* \return: kStatusOK, kStatusInvalid, kStatusNotFound
*/
template <typename T>
int ParseArgument(const std::string& argument, std::string* extraInfo) {
for (auto& command :
flags_internal::CommandLineFlagRegistry<T>::Instance()->commands) {
std::string& name = command.name;
T* value = command.value;
std::string prefix = "--";
prefix += name;
prefix += "=";
std::string content;
if (str::startsWith(argument, prefix)) {
content = argument.substr(prefix.size(), argument.size() - prefix.size());
} else {
prefix = "-";
prefix += name;
prefix += "=";
if (str::startsWith(argument, prefix)) {
content =
argument.substr(prefix.size(), argument.size() - prefix.size());
}
}
if (!content.empty()) {
if (StringToValue(content, value)) {
return kStatusOK;
} else {
*extraInfo = name;
return kStatusInvalid;
}
}
}
return kStatusNotFound;
}
/**
* @brief ParseBoolArgumentExtra
* parse '--flag_name', '-flag_name' as true; '--noflag_name', '-noflag_name' as
* false
*/
static int ParseBoolArgumentExtra(const std::string& argument,
std::string* extraInfo) {
(void)(extraInfo); // unused extraInfo, just make api same.
//! @warning: The order and content of prefixes is DESIGNED for parsing
//! command line. The length of prefixes are 1, 2, 3, 4. The parse logic takes
//! use of this fact. DO NOT CHANGE IT without reading how to parse command
//! below.
static const std::vector<std::pair<const char*, bool>> prefixes = {
{"-", true}, {"--", true}, {"-no", false}, {"--no", false}};
for (flags_internal::CommandLineFlagRegistry<bool>::Command& command :
flags_internal::CommandLineFlagRegistry<bool>::Instance()->commands) {
if (argument.size() > command.name.size()) {
//! Use the length of prefix is 1, 2, 3, 4.
size_t diff = argument.size() - command.name.size() - 1UL;
if (diff < prefixes.size()) {
const std::string& prefix = std::get<0>(prefixes[diff]);
if (argument == prefix + command.name) {
*command.value = std::get<1>(prefixes[diff]);
return kStatusOK;
}
}
}
}
return kStatusNotFound;
}
/**
* \brief: Print command line arguments' usage with type T.
*/
template <typename T>
static void PrintTypeUsage() {
for (auto& command :
flags_internal::CommandLineFlagRegistry<T>::Instance()->commands) {
std::string& name = command.name;
name = "--" + name; // Program will exit, so modify name is safe.
std::string& desc = command.text;
T& defaultValue = command.defaultValue;
std::cerr << std::setw(20) << name << ": " << desc
<< "[default:" << defaultValue << "]." << std::endl;
}
}
template <typename... TS>
static void PrintTypeUsages() {
int unused[] = {0, (PrintTypeUsage<TS>(), 0)...};
(void)(unused);
}
/**
* \brief: Print all usage, and exit(1)
*/
static void PrintUsageAndExit(const char* argv0) {
std::cerr << "Program " << argv0 << " Flags: " << std::endl;
PrintTypeUsages<bool, int32_t, std::string, double, int64_t, uint64_t>();
exit(1);
}
/**
* \brief: Print the error flags, usage, and exit.
*/
static void PrintParseError(const std::string& name,
const char* actualInput,
const char* arg0) {
std::cerr << "Parse command flag " << name << " error! User input is "
<< actualInput << std::endl;
PrintUsageAndExit(arg0);
}
void ParseCommandLineFlags(int* argc, char** argv, bool withHelp) {
int unused_argc = 1;
std::string extra;
for (int i = 1; i < *argc; ++i) {
std::string arg = argv[i];
int s = kStatusInvalid;
#define ParseArgumentWithType(type) \
s = ParseArgument<type>(arg, &extra); \
if (s == kStatusOK) { \
continue; \
} else if (s == kStatusInvalid) { \
PrintParseError(extra, argv[i], argv[0]); \
}
ParseArgumentWithType(bool); // NOLINT
ParseArgumentWithType(int32_t);
ParseArgumentWithType(double); // NOLINT
ParseArgumentWithType(int64_t);
ParseArgumentWithType(uint64_t);
ParseArgumentWithType(std::string);
#undef ParseArgumentWithType
s = ParseBoolArgumentExtra(arg, &extra);
if (s == kStatusOK) {
continue;
}
if (withHelp && (arg == "--help" || arg == "-h")) {
PrintUsageAndExit(argv[0]);
}
// NOT Found for all flags.
std::swap(argv[unused_argc++], argv[i]);
}
*argc = unused_argc;
}
} // namespace paddle
#else
namespace paddle {
#ifndef GFLAGS_NS
#define GFLAGS_NS google
......@@ -243,4 +30,3 @@ void ParseCommandLineFlags(int* argc, char** argv, bool withHelp) {
}
} // namespace paddle
#endif
......@@ -13,167 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifndef PADDLE_USE_GFLAGS
#include <stdint.h>
#include <string>
#include <vector>
#include "DisableCopy.h"
namespace paddle {
namespace flags_internal {
/**
* Command line flag registry for special type T. It will store all command
* arguments settings. such as name, default value.
*/
template <typename T>
struct CommandLineFlagRegistry {
/**
* The factory method of CommandLineFlagRegistry
*
* \return: The singleton instance of CommandLineFlagRegistry.
*/
static CommandLineFlagRegistry* Instance() {
static CommandLineFlagRegistry instance_;
return &instance_;
}
struct Command {
/// name of argument.
std::string name;
/// address of actual variable. such as FLAGS_xxx.
T* value;
/// usage text.
std::string text;
/// default value of this command.
T defaultValue;
};
/// the command line arguments of type T.
std::vector<Command> commands;
DISABLE_COPY(CommandLineFlagRegistry);
private:
inline CommandLineFlagRegistry() {}
};
/**
*Helper class to register command line flag.
*/
template <typename T>
struct CommandLineFlagRegister {
/**
* \brief: Register a command line argument
*
* \param [in] name: The command line name.
* \param [inout] val: The command line argument instance, FLAGS_xxx.
* \param [in] desc: The command line helper message.
*/
CommandLineFlagRegister(const std::string& name,
T* val,
const std::string desc) {
CommandLineFlagRegistry<T>::Instance()->commands.push_back(
{name, val, desc, *val});
}
};
/**
* \brief: Define a command line arguments.
*
* \param type: The variable type, such as int, double, etc.
* \param name: The variable name. The command line argument is '--name', the
*variable
*is 'FLAGS_name'
* \param default_value: The default value of command line argument.
* \param text: The description in command line argument.
*/
#define PADDLE_DEFINE_variable(type, name, default_value, text) \
type FLAGS_##name = default_value; \
namespace paddle_flags_internal { \
paddle::flags_internal::CommandLineFlagRegister<type> \
flags_internal_var_##name(#name, &FLAGS_##name, text); \
} // namespace paddle_flags_internal
/**
* Declare a variable to use.
*/
#define PADDLE_DECLARE_variable(type, name) extern type FLAGS_##name;
// DEFINE macro for each types.
#define P_DEFINE_int32(name, default_value, text) \
PADDLE_DEFINE_variable(int32_t, name, default_value, text)
#define P_DEFINE_bool(name, default_value, text) \
PADDLE_DEFINE_variable(bool, name, default_value, text)
#define P_DEFINE_string(name, default_value, text) \
PADDLE_DEFINE_variable(std::string, name, default_value, text)
#define P_DEFINE_double(name, default_value, text) \
PADDLE_DEFINE_variable(double, name, default_value, text)
#define P_DEFINE_int64(name, default_value, text) \
PADDLE_DEFINE_variable(int64_t, name, default_value, text)
#define P_DEFINE_uint64(name, default_value, text) \
PADDLE_DEFINE_variable(uint64_t, name, default_value, text)
// Declare macro for each types.
#define P_DECLARE_int32(name) PADDLE_DECLARE_variable(int32_t, name)
#define P_DECLARE_bool(name) PADDLE_DECLARE_variable(bool, name)
#define P_DECLARE_string(name) PADDLE_DECLARE_variable(std::string, name)
#define P_DECLARE_double(name) PADDLE_DECLARE_variable(double, name)
#define P_DECLARE_int64(name) PADDLE_DECLARE_variable(int64_t, name)
#define P_DECLARE_uint64(name) PADDLE_DECLARE_variable(uint64_t, name)
} // namespace flags_internal
/**
* \brief Parse command line flags. If parse error, just failed and exit 1.
*
* \param [inout] argc: The command argument count. This method will modify
*argc, and left unused arguments.
* \param [inout] argv: The command argument values. This method will modify
*argv, and left unused arguments.
* \param [in] withHelp: True will parse '-h' and '--help' to print usage.
*
* \note: The Command line flags format basically as follow:
*
* * If the type of flag is not bool, then the follow format of command line
* will be parsed:
* * --flag_name=value
* * -flag_name=value
*
* * If the flag is bool, then:
* * --flag_name=value, -flag_name=value will be parsed.
* * if value.tolower() == "true"| "1" will be treated as true.
* * else if value.tolower() == "false" | "0" will be treated as false.
* * --flag_name will be parsed as true.
* * --noflag_name will be parsed as false.
*/
void ParseCommandLineFlags(int* argc, char** argv, bool withHelp = true);
} // namespace paddle
#else // if use gflags.
#include <gflags/gflags.h>
#define P_DEFINE_int32 DEFINE_int32
#define P_DEFINE_bool DEFINE_bool
#define P_DEFINE_string DEFINE_string
#define P_DEFINE_double DEFINE_double
#define P_DEFINE_int64 DEFINE_int64
#define P_DEFINE_uint64 DEFINE_uint64
#define P_DECLARE_int32 DECLARE_int32
#define P_DECLARE_bool DECLARE_bool
#define P_DECLARE_string DECLARE_string
#define P_DECLARE_double DECLARE_double
#define P_DECLARE_int64 DECLARE_int64
#define P_DECLARE_uint64 DECLARE_uint64
namespace paddle {
void ParseCommandLineFlags(int* argc, char** argv, bool withHelp = true);
} // namespace paddle
#endif
......@@ -16,7 +16,7 @@ limitations under the License. */
#include <iostream>
#include "CommandLineParser.h"
P_DEFINE_bool(
DEFINE_bool(
layer_stack_error_only_current_thread,
true,
"Dump current thread or whole process layer stack when signal error "
......
......@@ -15,65 +15,61 @@ limitations under the License. */
#include "Flags.h"
#ifdef PADDLE_ONLY_CPU
P_DEFINE_bool(use_gpu, false, "Only support CPU training");
DEFINE_bool(use_gpu, false, "Only support CPU training");
#else
P_DEFINE_bool(use_gpu, true, "Whether to use GPU for training");
DEFINE_bool(use_gpu, true, "Whether to use GPU for training");
#endif
P_DEFINE_bool(
parallel_nn,
DEFINE_bool(parallel_nn,
false,
"Whether to use multi-threads to calculate one neural network."
"If it was set false, use gpu_id specify which gpu core to use"
"(the device property in the trainer config file will be ingored)."
"If it was set true, the gpu core is specified by the trainer"
" config file(gpu_id will be ignored).");
P_DEFINE_int32(trainer_count, 1, "Defined how many trainers to train");
P_DEFINE_int32(gpu_id, 0, "Which gpu core to use");
P_DEFINE_int32(port, 20134, "Listening port for pserver");
P_DEFINE_int32(data_server_port, 21134, "Listening port for dserver");
P_DEFINE_int32(ports_num,
DEFINE_int32(trainer_count, 1, "Defined how many trainers to train");
DEFINE_int32(gpu_id, 0, "Which gpu core to use");
DEFINE_int32(port, 20134, "Listening port for pserver");
DEFINE_int32(data_server_port, 21134, "Listening port for dserver");
DEFINE_int32(ports_num,
1,
"The ports number for parameter send,"
" increment based on default port number");
P_DEFINE_int32(ports_num_for_sparse,
DEFINE_int32(ports_num_for_sparse,
0,
"The ports number for parameter send,"
" increment based on default (port + ports_num)");
P_DEFINE_string(nics, "xgbe0,xgbe1", "network device name for pservers");
P_DEFINE_string(rdma_tcp, "tcp", "use rdma or tcp rdma transport protocol");
P_DEFINE_int32(
trainer_id,
DEFINE_string(nics, "xgbe0,xgbe1", "network device name for pservers");
DEFINE_string(rdma_tcp, "tcp", "use rdma or tcp rdma transport protocol");
DEFINE_int32(trainer_id,
0,
"For distributed training, each trainer must be given an unique id"
" ranging from 0 to num_trainers-1. Trainer 0 is the master"
" trainer");
P_DEFINE_int32(num_gradient_servers, 1, "number of gradient servers");
P_DEFINE_string(comment, "", "A string for commenting this training task");
P_DEFINE_string(load_missing_parameter_strategy,
DEFINE_int32(num_gradient_servers, 1, "number of gradient servers");
DEFINE_string(comment, "", "A string for commenting this training task");
DEFINE_string(load_missing_parameter_strategy,
"fail",
"which operation to take on load model fails. support "
"fail/rand/zero only.");
P_DEFINE_int32(log_period, 100, "Log progress every so many batches");
P_DEFINE_int32(log_period_server,
DEFINE_int32(log_period, 100, "Log progress every so many batches");
DEFINE_int32(log_period_server,
500,
"Log progress every so many batches at pserver end");
P_DEFINE_double(checkgrad_eps, 1e-5, "parameter change size for checkgrad");
P_DEFINE_int32(enable_parallel_vector,
0,
"threshold for enable parallel vector");
P_DEFINE_bool(loadsave_parameters_in_pserver,
DEFINE_double(checkgrad_eps, 1e-5, "parameter change size for checkgrad");
DEFINE_int32(enable_parallel_vector, 0, "threshold for enable parallel vector");
DEFINE_bool(loadsave_parameters_in_pserver,
false,
"load and save parameters in pserver. "
"only work while parameter set sparse_remote_update.");
P_DEFINE_int32(beam_size,
DEFINE_int32(beam_size,
1,
"Beam size used in generating most probable output sequences.");
P_DEFINE_bool(show_layer_stat, false, "show the statistics of each layer");
P_DEFINE_string(predict_file, "", "File name for saving predict result");
P_DEFINE_bool(prev_batch_state, false, "batch is continue with next batch");
P_DEFINE_string(init_model_path,
DEFINE_bool(show_layer_stat, false, "show the statistics of each layer");
DEFINE_string(predict_file, "", "File name for saving predict result");
DEFINE_bool(prev_batch_state, false, "batch is continue with next batch");
DEFINE_string(init_model_path,
"",
"Path of the initial model parameters."
"If it was set, start_pass will be ignored.");
......@@ -16,28 +16,28 @@ limitations under the License. */
#include "CommandLineParser.h"
P_DECLARE_bool(parallel_nn);
P_DECLARE_int32(async_count);
P_DECLARE_int32(port);
P_DECLARE_int32(data_server_port);
P_DECLARE_bool(use_gpu);
P_DECLARE_int32(gpu_id);
P_DECLARE_int32(trainer_count);
P_DECLARE_int32(ports_num);
P_DECLARE_int32(ports_num_for_sparse);
P_DECLARE_string(nics);
P_DECLARE_string(rdma_tcp);
P_DECLARE_int32(trainer_id);
P_DECLARE_int32(num_gradient_servers);
P_DECLARE_string(comment);
P_DECLARE_string(load_missing_parameter_strategy);
P_DECLARE_int32(log_period);
P_DECLARE_int32(log_period_server);
P_DECLARE_double(checkgrad_eps);
P_DECLARE_int32(enable_parallel_vector);
P_DECLARE_bool(loadsave_parameters_in_pserver);
P_DECLARE_int32(beam_size);
P_DECLARE_bool(show_layer_stat);
P_DECLARE_string(predict_file);
P_DECLARE_bool(prev_batch_state);
P_DECLARE_string(init_model_path);
DECLARE_bool(parallel_nn);
DECLARE_int32(async_count);
DECLARE_int32(port);
DECLARE_int32(data_server_port);
DECLARE_bool(use_gpu);
DECLARE_int32(gpu_id);
DECLARE_int32(trainer_count);
DECLARE_int32(ports_num);
DECLARE_int32(ports_num_for_sparse);
DECLARE_string(nics);
DECLARE_string(rdma_tcp);
DECLARE_int32(trainer_id);
DECLARE_int32(num_gradient_servers);
DECLARE_string(comment);
DECLARE_string(load_missing_parameter_strategy);
DECLARE_int32(log_period);
DECLARE_int32(log_period_server);
DECLARE_double(checkgrad_eps);
DECLARE_int32(enable_parallel_vector);
DECLARE_bool(loadsave_parameters_in_pserver);
DECLARE_int32(beam_size);
DECLARE_bool(show_layer_stat);
DECLARE_string(predict_file);
DECLARE_bool(prev_batch_state);
DECLARE_string(init_model_path);
......@@ -18,175 +18,9 @@ limitations under the License. */
*/
#include "Logging.h"
#ifndef PADDLE_USE_GLOG
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mutex>
#include <thread>
#include <vector>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
namespace paddle {
namespace internal {
std::string join(const std::string& part1, const std::string& part2) {
const char sep = '/';
if (!part2.empty() && part2.front() == sep) {
return part2;
}
std::string ret;
ret.reserve(part1.size() + part2.size() + 1);
ret = part1;
if (!ret.empty() && ret.back() != sep) {
ret += sep;
}
ret += part2;
return ret;
}
static inline bool env2bool(const char* envName, bool defaultValue = false) {
char* envValue = getenv(envName);
if (envValue == nullptr) {
return defaultValue;
} else {
return memchr("tTyY1\0", envValue[0], 6) != nullptr;
}
}
static inline int env2int(const char* envName, int defaultValue = 0) {
char* envValue = getenv(envName);
if (envValue == nullptr) {
return defaultValue;
} else {
int retValue = defaultValue;
try {
retValue = std::stoi(envValue);
} catch (...) {
// pass
}
return retValue;
}
}
static inline int env2index(const char* envName,
const std::vector<std::string>& options,
int defaultValue) {
char* envValue = getenv(envName);
if (envValue == nullptr) {
return defaultValue;
} else {
for (size_t i = 0; i < options.size(); ++i) {
if (options[i] == envValue) {
return static_cast<int>(i);
}
}
return defaultValue;
}
}
static bool gLogToStderr = env2bool("PLOG_LOGTOSTDERR", true);
static const std::vector<std::string> gLevelName = {
"INFO", "WARNING", "ERROR", "FATAL"};
static int gMinLogLevel =
env2int("PLOG_MINLOGLEVEL", env2index("PLOG_MINLOGLEVEL", gLevelName, 0));
static std::vector<std::vector<int>> gLogFds;
static std::vector<int> gLogFileFds;
static bool gLogInited = false;
static void freeLogFileFds() {
for (auto fd : gLogFileFds) {
close(fd);
}
}
static void initializeLogFds(char* argv0) {
gLogFds.resize(NUM_SEVERITIES);
for (int i = gMinLogLevel; i < NUM_SEVERITIES && gLogToStderr;
++i) { // Add stderr
std::vector<int>& fds = gLogFds[i];
fds.push_back(STDERR_FILENO);
}
char* logDir = getenv("PLOG_LOGDIR");
for (int i = gMinLogLevel; i < NUM_SEVERITIES && logDir != nullptr; ++i) {
std::string filename =
join(logDir, std::string(argv0) + "." + gLevelName[i]);
int fd = open(filename.c_str(), O_CREAT | O_WRONLY, 0644);
if (fd == -1) {
fprintf(stderr, "Open log file error!");
exit(1);
}
gLogFileFds.push_back(fd);
std::vector<int>& curFds = gLogFds[i];
curFds.insert(curFds.end(), gLogFileFds.begin(), gLogFileFds.end());
}
atexit(freeLogFileFds);
gLogInited = true;
}
static void (*gFailureFunctionPtr)() ATTR_NORETURN = abort;
LogMessage::LogMessage(const char* fname, int line, int severity)
: fname_(fname), line_(line), severity_(severity) {}
LogMessage::~LogMessage() { this->generateLogMessage(); }
void LogMessage::generateLogMessage() {
if (!gLogInited) {
fprintf(stderr,
"%c %s:%d] %s\n",
"IWEF"[severity_],
fname_,
line_,
str().c_str());
} else {
for (auto& fd : gLogFds[this->severity_]) {
dprintf(fd,
"%c %s:%d] %s\n",
"IWEF"[severity_],
fname_,
line_,
str().c_str());
}
}
}
LogMessageFatal::LogMessageFatal(const char* file, int line)
: LogMessage(file, line, FATAL) {}
LogMessageFatal::~LogMessageFatal() {
generateLogMessage();
gFailureFunctionPtr();
}
} // namespace internal
void initializeLogging(int argc, char** argv) {
internal::initializeLogFds(argv[0]);
}
namespace logging {
void setMinLogLevel(int level) { paddle::internal::gMinLogLevel = level; }
void installFailureFunction(void (*callback)() ATTR_NORETURN) {
paddle::internal::gFailureFunctionPtr = callback;
}
} // namespace logging
} // namespace paddle
#else
namespace paddle {
void initializeLogging(int argc, char** argv) {
(void)(argc);
if (!getenv("GLOG_logtostderr")) {
......@@ -197,13 +31,16 @@ void initializeLogging(int argc, char** argv) {
}
namespace logging {
void setMinLogLevel(int level) { FLAGS_minloglevel = level; }
void installFailureFunction(void (*callback)()) {
google::InstallFailureFunction(callback);
}
void installFailureWriter(void (*callback)(const char*, int)) {
google::InstallFailureWriter(callback);
}
} // namespace logging
} // namespace paddle
#endif
......@@ -22,175 +22,21 @@ limitations under the License. */
#include <sstream>
#include <string>
#ifndef PADDLE_USE_GLOG
#include "CompilerMacros.h"
//! TODO(yuyang18): Move this utility macro into some global header.
#define PP_CAT(a, b) PP_CAT_I(a, b)
#define PP_CAT_I(a, b) PP_CAT_II(~, a##b)
#define PP_CAT_II(p, res) res
/**
* Generate Unique Variable Name, Usefully in macro.
* @SEE
* http://stackoverflow.com/questions/1082192/how-to-generate-random-variable-names-in-c-using-macros
*/
#define UNIQUE_NAME(base) PP_CAT(base, __LINE__)
#include <glog/logging.h>
namespace paddle {
//! Log levels.
const int INFO = 0;
const int WARNING = 1;
const int ERROR = 2;
const int FATAL = 3;
const int NUM_SEVERITIES = 4;
namespace internal {
class LogMessage : public std::basic_ostringstream<char> {
public:
LogMessage(const char* fname, int line, int severity);
~LogMessage();
protected:
/**
* @brief Print log message to stderr, files, etc.
*/
void generateLogMessage();
private:
const char* fname_;
int line_;
int severity_;
};
// LogMessageFatal ensures the process will exit in failure after
// logging this message.
class LogMessageFatal : public LogMessage {
public:
LogMessageFatal(const char* file, int line) __attribute__((cold));
~LogMessageFatal() __attribute__((noreturn));
};
#define _P_LOG_INFO \
::paddle::internal::LogMessage(__FILE__, __LINE__, paddle::INFO)
#define _P_LOG_WARNING \
::paddle::internal::LogMessage(__FILE__, __LINE__, paddle::WARNING)
#define _P_LOG_ERROR \
::paddle::internal::LogMessage(__FILE__, __LINE__, paddle::ERROR)
#define _P_LOG_FATAL ::paddle::internal::LogMessageFatal(__FILE__, __LINE__)
#define P_LOG(severity) _P_LOG_##severity
#define P_LOG_FIRST_N(severity, n) \
static int UNIQUE_NAME(LOG_OCCURRENCES) = 0; \
if (UNIQUE_NAME(LOG_OCCURRENCES) <= n) ++UNIQUE_NAME(LOG_OCCURRENCES); \
if (UNIQUE_NAME(LOG_OCCURRENCES) <= n) P_LOG(severity)
#define P_LOG_IF_EVERY_N(severity, condition, n) \
static int UNIQUE_NAME(LOG_OCCURRENCES) = 0; \
if (condition && ((UNIQUE_NAME(LOG_OCCURRENCES) = \
(UNIQUE_NAME(LOG_OCCURRENCES) + 1) % n) == (1 % n))) \
P_LOG(severity)
#define P_LOG_EVERY_N(severity, n) P_LOG_IF_EVERY_N(severity, true, n)
// TODO(jeff): Define a proper implementation of VLOG_IS_ON
#define P_VLOG_IS_ON(lvl) ((lvl) <= 0)
#define P_LOG_IF(severity, condition) \
if (condition) P_LOG(severity)
#define P_VLOG(lvl) P_LOG_IF(INFO, P_VLOG_IS_ON(lvl))
#define P_VLOG_IF(lvl, cond) P_LOG_IF(INFO, P_VLOG_IS_ON(lvl) && cond)
#define P_VLOG_EVERY_N(lvl, n) P_LOG_IF_EVERY_N(INFO, P_VLOG_IS_ON(lvl), n)
#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode. Therefore, it is safe to do things like:
// CHECK(fp->Write(x) == 4)
#define P_CHECK(condition) \
if (PREDICT_FALSE(!(condition))) \
P_LOG(FATAL) << "Check failed: " #condition " "
#define P_CHECK_EQ(val1, val2) P_CHECK((val1) == (val2))
#define P_CHECK_NE(val1, val2) P_CHECK((val1) != (val2))
#define P_CHECK_LE(val1, val2) P_CHECK((val1) <= (val2))
#define P_CHECK_LT(val1, val2) P_CHECK((val1) < (val2))
#define P_CHECK_GE(val1, val2) P_CHECK((val1) >= (val2))
#define P_CHECK_GT(val1, val2) P_CHECK((val1) > (val2))
#define P_CHECK_NOTNULL(val) P_CHECK((val) != NULL)
//! GLOG compatible APIs
//! NOTE: only implement Paddle actually used APIs.
#define LOG(x) P_LOG(x)
#define VLOG(x) P_VLOG(x)
#define DLOG(x) P_VLOG(5)
#define CHECK(x) P_CHECK(x)
#define PCHECK(x) P_CHECK(x)
#define CHECK_EQ(val1, val2) P_CHECK((val1) == (val2))
#define CHECK_NE(val1, val2) P_CHECK((val1) != (val2))
#define CHECK_LE(val1, val2) P_CHECK((val1) <= (val2))
#define CHECK_LT(val1, val2) P_CHECK((val1) < (val2))
#define CHECK_GE(val1, val2) P_CHECK((val1) >= (val2))
#define CHECK_GT(val1, val2) P_CHECK((val1) > (val2))
#define CHECK_NOTNULL(val) P_CHECK((val) != NULL)
#define VLOG_IS_ON(x) P_VLOG_IS_ON(x)
#define LOG_FIRST_N(severity, n) P_LOG_FIRST_N(severity, n)
#define LOG_IF(severity, condition) P_LOG_IF(severity, condition)
#define VLOG_EVERY_N(lvl, n) P_VLOG_EVERY_N(lvl, n)
#define VLOG_IF(lvl, cond) P_VLOG_IF(lvl, cond)
#define LOG_EVERY_N(severity, n) P_LOG_EVERY_N(severity, n)
} // namespace internal
/**
* @brief initialize logging
* @note: Current implement of logging is lack of:
* PrintCallStack when fatal.
* VLOG_IS_ON
* But it is portable to multi-platform, and simple enough to modify.
*/
void initializeLogging(int argc, char** argv);
namespace logging {
/**
* @brief Set Min Log Level. if Log.level < minLogLevel, then will not print log
* to stream
* @param level. Any integer is OK, but only 0 <= x <= NUM_SEVERITIES is useful.
*/
void setMinLogLevel(int level);
/**
* @brief Install Log(Fatal) failure function. Default is abort();
* @param callback: The failure function.
*/
void installFailureFunction(void (*callback)() ATTR_NORETURN);
/**
* @brief installFailureWriter
* @note: not implemented currently.
*/
inline void installFailureWriter(void (*callback)(const char*, int)) {
(void)(callback); // unused callback.
}
} // namespace logging
} // namespace paddle
#else
#include <glog/logging.h>
namespace paddle {
void initializeLogging(int argc, char** argv);
namespace logging {
void setMinLogLevel(int level);
void installFailureFunction(void (*callback)());
void installFailureWriter(void (*callback)(const char*, int));
} // namespace logging
}
#endif // PADDLE_USE_GLOG
} // namespace paddle
#ifndef NDEBUG
#define DEBUG_LEVEL 5
......
......@@ -20,8 +20,8 @@ namespace paddle {
#ifdef PADDLE_NO_PYTHON
P_DEFINE_string(python_path, "", "python path");
P_DEFINE_string(python_bin, "python2.7", "python bin");
DEFINE_string(python_path, "", "python path");
DEFINE_string(python_bin, "python2.7", "python bin");
constexpr int kExecuteCMDBufLength = 204800;
......
......@@ -16,7 +16,7 @@ limitations under the License. */
#include "CommandLineParser.h"
#include "Util.h"
P_DEFINE_bool(thread_local_rand_use_global_seed,
DEFINE_bool(thread_local_rand_use_global_seed,
false,
"Whether to use global seed in thread local rand.");
......
......@@ -33,7 +33,7 @@ limitations under the License. */
#include "ThreadLocal.h"
#include "Version.h"
P_DEFINE_int32(seed, 1, "random number seed. 0 for srand(time)");
DEFINE_int32(seed, 1, "random number seed. 0 for srand(time)");
#ifdef WITH_GOOGLE_PERFTOOLS
/*
......@@ -52,10 +52,8 @@ P_DEFINE_int32(seed, 1, "random number seed. 0 for srand(time)");
#include <gperftools/profiler.h>
P_DEFINE_int32(profile_signal, 12, "signal for switch google profiler");
P_DEFINE_string(profile_data_file,
"gperf.prof",
"file for storing profile data");
DEFINE_int32(profile_signal, 12, "signal for switch google profiler");
DEFINE_string(profile_data_file, "gperf.prof", "file for storing profile data");
static void profilerSwitch(int signalNumber) {
bool static started = false;
......
......@@ -18,13 +18,8 @@ limitations under the License. */
#include <numeric>
#include "Flags.h"
#include "Util.h"
//! TODO(yuyang18) in gflags, version has another define. Use another flag
//! instead.
#ifndef PADDLE_USE_GFLAGS
P_DEFINE_bool(version, false, "print version");
#else
P_DECLARE_bool(version);
#endif
DECLARE_bool(version);
namespace paddle {
namespace version {
......
add_simple_unittest(test_CommandLineParser)
add_simple_unittest(test_Logging)
add_simple_unittest(test_Thread)
add_simple_unittest(test_StringUtils)
add_simple_unittest(test_CustomStackTrace)
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifndef PADDLE_USE_GFLAGS
//! Test Command Line Parser for paddle internal implement.
#include <gtest/gtest.h>
#include <paddle/utils/CommandLineParser.h>
P_DEFINE_int32(i1, 1, "test int flag 1");
P_DEFINE_int32(i2, 2, "test int flag 2");
P_DEFINE_string(str1, "1", "test str flag 1");
P_DEFINE_string(str2, "2", "test str flag 2");
P_DEFINE_bool(b1, true, "test bool flag 1");
P_DEFINE_bool(b2, false, "test bool flag 2");
P_DEFINE_double(d1, 0.1, "test double flag 1");
P_DEFINE_double(d2, -42.3, "test double flag 2");
P_DEFINE_int64(l1, 1, "test int64 flag 1");
P_DEFINE_int64(l2, 2, "test int64 flag 2");
P_DEFINE_uint64(ul1, 32, "test uint64 flag 1");
P_DEFINE_uint64(ul2, 33, "test uint64 flag 2");
constexpr double EPSILON = 1e-5;
#define cc(x) const_cast<char*>((x))
TEST(CommandLineParser, defaultValue) {
char* argv[] = {cc("test_program"), cc("--unused_flag=134")};
int argc = sizeof(argv) / sizeof(char*);
paddle::ParseCommandLineFlags(&argc, argv);
// Check Default Value
ASSERT_EQ(argc, 2);
ASSERT_EQ(FLAGS_i1, 1);
ASSERT_EQ(FLAGS_i2, 2);
ASSERT_EQ(FLAGS_str1, "1");
ASSERT_EQ(FLAGS_str2, "2");
ASSERT_EQ(FLAGS_b1, true);
ASSERT_EQ(FLAGS_b2, false);
ASSERT_NEAR(FLAGS_d1, 0.1, EPSILON);
ASSERT_NEAR(FLAGS_d2, -42.3, EPSILON);
ASSERT_EQ(FLAGS_i1, 1);
ASSERT_EQ(FLAGS_i2, 2);
ASSERT_EQ(FLAGS_ul1, 32UL);
ASSERT_EQ(FLAGS_ul2, 33UL);
}
TEST(CommandLineParser, normal) {
char* argv[] = {cc("test_program"),
cc("--i2=32"),
cc("--str1=abc"),
cc("--b2=1"),
cc("-b1=False"),
cc("--d2=.34"),
cc("--d1=0"),
cc("--l1=-12345678901234"),
cc("-ul2=3212")};
int argc = sizeof(argv) / sizeof(char*);
paddle::ParseCommandLineFlags(&argc, argv);
ASSERT_EQ(argc, 1);
ASSERT_EQ(FLAGS_i2, 32);
ASSERT_EQ(FLAGS_str1, "abc");
ASSERT_EQ(FLAGS_b2, true);
ASSERT_EQ(FLAGS_b1, false);
ASSERT_NEAR(FLAGS_d2, 0.34, EPSILON);
ASSERT_NEAR(FLAGS_d1, 0.0, EPSILON);
ASSERT_EQ(FLAGS_l1, -12345678901234);
ASSERT_EQ(FLAGS_ul2, 3212UL);
}
TEST(CommandLineParser, printHelp) {
char* argv[] = {cc("test_program"), cc("--help")};
int argc = sizeof(argv) / sizeof(char*);
// Will Print Usage
ASSERT_DEATH(paddle::ParseCommandLineFlags(&argc, argv), ".*test_program.*");
}
TEST(CommandLineParser, parseError) {
char* argv[] = {cc("test_program"), cc("--i1=abc")};
int argc = sizeof(argv) / sizeof(char*);
ASSERT_DEATH(
paddle::ParseCommandLineFlags(&argc, argv),
"Parse command flag i1 error! User input is --i1=abc.*test_program.*");
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#else
int main(int argc, char** argv) { return 0; }
#endif
......@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/utils/Locks.h"
#include "paddle/utils/Util.h"
P_DEFINE_int32(test_thread_num, 10, "testing thread number");
DEFINE_int32(test_thread_num, 10, "testing thread number");
void testNormalImpl(
const std::function<void(paddle::CustomStackTrace<std::string>&,
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
/*
* Basically from tensorflow/core/platform/default/logging.cc
* Used in embedded system where there is no glogs.
*/
#include <dirent.h>
#include <gtest/gtest.h>
#include <stdlib.h>
#include <fstream>
#include "paddle/utils/Logging.h"
#include "paddle/utils/Util.h"
#ifndef PADDLE_USE_GLOG
TEST(Logging, BasicalLog) {
auto pinfo = [] {
P_LOG(INFO) << "INFO";
exit(1);
};
ASSERT_DEATH(pinfo(), "I .*test_Logging.cpp:[0-9]+] INFO");
auto pwarn = [] {
P_LOG(WARNING) << "WARN";
exit(1);
};
ASSERT_DEATH(pwarn(), "W .*test_Logging.cpp:[0-9]+] WARN");
auto perr = [] {
P_LOG(ERROR) << "ERROR";
exit(1);
};
ASSERT_DEATH(perr(), "E .*test_Logging.cpp:[0-9]+] ERROR");
auto pfatal = [] { P_LOG(FATAL) << "FATAL"; };
ASSERT_DEATH(pfatal(), "F .*test_Logging.cpp:[0-9]+] FATAL");
}
TEST(Logging, Check) {
int a = 1;
int b = 2;
P_CHECK(a != b);
auto pcheckDown = [&] { P_CHECK(a == b); };
ASSERT_DEATH(pcheckDown(),
"F .*test_Logging.cpp:[0-9]+] Check failed: a == b ");
P_CHECK_LE(a, b);
P_CHECK_LT(a, b);
double t = 1.2;
P_CHECK_LE(a, t);
double* ptr = nullptr;
auto pcheckDown2 = [&] { P_CHECK_NOTNULL(ptr); };
ASSERT_DEATH(pcheckDown2(), "F");
}
#define cc(x) const_cast<char*>(x)
TEST(Logging, LogToStderr) {
auto logToStderrCallback = [] {
setenv("PLOG_LOGTOSTDERR", "0", true);
char* argv[] = {cc("test")};
paddle::initializeLogging(1, argv);
P_LOG(INFO) << "This output will not print to std error";
exit(1);
};
ASSERT_DEATH(logToStderrCallback(), "");
}
constexpr char kLogDirName[] = "./test_log_dir";
const std::vector<std::string> kLevels = {"INFO", "WARNING", "ERROR", "FATAL"};
TEST(Logging, LogToDir) {
ASSERT_EQ(0, mkdir(kLogDirName, 0777));
auto logToDirCallback = [] {
setenv("PLOG_LOGTOSTDERR", "0", true);
setenv("PLOG_LOGDIR", kLogDirName, true);
char* argv[] = {cc("test")};
paddle::initializeLogging(1, argv);
P_LOG(INFO) << "INFO";
P_LOG(WARNING) << "WARNING";
P_LOG(ERROR) << "ERROR";
P_LOG(FATAL) << "FATAL";
};
ASSERT_DEATH(logToDirCallback(), "");
// There 4 file in logdir
auto dir = opendir(kLogDirName);
size_t fileCount = 0;
std::vector<std::string> filenames;
for (auto dirContent = readdir(dir); dirContent != nullptr;
dirContent = readdir(dir)) {
std::string filename(dirContent->d_name);
if (filename == "." || filename == "..") {
continue;
} else {
++fileCount;
for (size_t i = 0; i < kLevels.size(); ++i) {
const std::string& curLevel = kLevels[i];
if (filename.size() > curLevel.length()) {
size_t diff = filename.size() - curLevel.length();
size_t j = 0;
for (; j < curLevel.length(); ++j) {
if (filename[j + diff] != curLevel[j]) {
// File Suffix Not Same, then break.
break;
}
}
if (j == curLevel.length()) { // Same suffix.
std::ifstream fin;
auto fn = paddle::path::join(kLogDirName, filename);
fin.open(fn);
filenames.push_back(fn);
ASSERT_TRUE(fin.is_open());
size_t lineCounter = 0;
for (std::string line; std::getline(fin, line); ++lineCounter) {
// Do Nothing, Just calc lineCounter.
}
// For example.
// The info channel will have all log which level >= INFO
// So the info file's lineCounter should == 4.
ASSERT_EQ(kLevels.size() - i, lineCounter);
fin.close();
}
}
}
}
}
closedir(dir);
ASSERT_EQ(4UL, fileCount); // 4 levels.
// Clean Unittest.
for (std::string& fn : filenames) {
ASSERT_EQ(remove(fn.c_str()), 0);
}
ASSERT_EQ(rmdir(kLogDirName), 0);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#else
int main(int, char**) { return 0; }
#endif
......@@ -19,7 +19,7 @@ limitations under the License. */
#include "paddle/utils/Logging.h"
#include "paddle/utils/Util.h"
P_DEFINE_int32(test_thread_num, 100, "testing thread number");
DEFINE_int32(test_thread_num, 100, "testing thread number");
void testNormalImpl(
size_t thread_num,
......
......@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/utils/Logging.h"
#include "paddle/utils/Util.h"
P_DEFINE_int32(test_thread_num, 100, "testing thread number");
DEFINE_int32(test_thread_num, 100, "testing thread number");
void testNormalImpl(
size_t thread_num,
......
......@@ -245,7 +245,7 @@ message ImageConfig {
// The size of input feature map.
required uint32 img_size = 8;
required uint32 img_size_y = 9;
optional uint32 img_size_y = 9;
}
message LayerInputConfig {
......
......@@ -171,7 +171,7 @@ def define_py_data_sources2(train_list, test_list, module, obj, args=None):
obj="process",
args={"dictionary": dict_name})
The related data provider can refer to :ref:`api_pydataprovider2_en_sequential_model` .
The related data provider can refer to :ref:`api_pydataprovider2_sequential_model` .
:param train_list: Train list name.
:type train_list: basestring
......
# Bazel (http://bazel.io/) BUILD file for gflags.
#
# See INSTALL.md for instructions for adding gflags to a Bazel workspace.
licenses(["notice"])
exports_files(["src/gflags_complections.sh", "COPYING.txt"])
load(":bazel/gflags.bzl", "gflags_sources", "gflags_library")
(hdrs, srcs) = gflags_sources(namespace=["google", "gflags"])
gflags_library(hdrs=hdrs, srcs=srcs, threads=0)
gflags_library(hdrs=hdrs, srcs=srcs, threads=1)
licenses(["notice"]) # Apache 2.0
cc_test(
name="gflags_test",
srcs=["gflags_test.cc"],
copts=["-Iexternal/gtest/include"],
deps=[
"@gtest//:gtest",
"@gflags//:gflags",
], )
#include <iostream>
#include <string>
#include "gflags/gflags.h"
#include "gtest/gtest.h"
DEFINE_bool(verbose, false, "Display program name before message");
DEFINE_string(message, "Hello world!", "Message to print");
static bool IsNonEmptyMessage(const char *flagname, const std::string &value) {
return value[0] != '\0';
}
DEFINE_validator(message, &IsNonEmptyMessage);
namespace third_party {
namespace gflags_test {
TEST(GflagsTest, ParseAndPrint) {
gflags::SetUsageMessage("some usage message");
gflags::SetVersionString("1.0.0");
int argc = 1;
char program_name[] = "gflags_test";
char **argv = new char *[2];
argv[0] = program_name;
argv[1] = NULL;
gflags::ParseCommandLineFlags(&argc, reinterpret_cast<char ***>(&argv), true);
EXPECT_EQ("gflags_test", std::string(gflags::ProgramInvocationShortName()));
EXPECT_EQ("Hello world!", FLAGS_message);
gflags::ShutDownCommandLineFlags();
}
} // namespace gflags_test
} // namespace third_party
licenses(["notice"])
cc_library(
visibility=["//visibility:public"],
name="glog",
includes=[
".",
"src",
],
copts=[
"-D_START_GOOGLE_NAMESPACE_='namespace google {'",
"-D_END_GOOGLE_NAMESPACE_='}'",
"-DGOOGLE_NAMESPACE='google'",
"-DGOOGLE_GLOG_DLL_DECL=''",
"-DHAVE_DLADDR",
"-DHAVE_SNPRINTF",
"-DHAVE_DLFCN_H",
"-DHAVE_FCNTL",
"-DHAVE_GLOB_H",
"-DHAVE_INTTYPES_H",
"-DHAVE_LIBPTHREAD",
"-DHAVE_SYS_SYSCALL_H",
"-DHAVE_MEMORY_H",
"-DHAVE_NAMESPACES",
"-DHAVE_PREAD",
"-DHAVE_PTHREAD",
"-DHAVE_PWD_H",
"-DHAVE_PWRITE",
"-DHAVE_RWLOCK",
"-DHAVE_SIGACTION",
"-DHAVE_SIGALTSTACK",
"-DHAVE_STDINT_H",
"-DHAVE_STRING_H",
"-DHAVE_SYS_TIME_H",
"-DHAVE_SYS_TYPES_H",
"-DHAVE_SYS_UCONTEXT_H",
"-DHAVE_SYS_UTSNAME_H",
"-DHAVE_UNISTD_H",
"-DHAVE_USING_OPERATOR",
"-DHAVE_HAVE___ATTRIBUTE___",
"-DHAVE_HAVE___BUILTIN_EXPECT",
#"-DNO_FRAME_POINTER",
"-D_GNU_SOURCE",
#"-fno-sanitize=thread",
#"-fno-sanitize=address",
"-Iexternal/glog/src",
],
srcs=[
"src/demangle.cc",
"src/logging.cc",
"src/raw_logging.cc",
"src/signalhandler.cc",
"src/symbolize.cc",
"src/utilities.cc",
"src/vlog_is_on.cc",
":config_h",
":logging_h",
":raw_logging_h",
":stl_logging_h",
":vlog_is_on_h",
],
hdrs=[
"src/demangle.h",
"src/mock-log.h",
"src/stacktrace.h",
"src/symbolize.h",
"src/utilities.h",
"src/base/commandlineflags.h",
"src/base/googleinit.h",
"src/base/mutex.h",
"src/glog/log_severity.h",
])
genrule(
name="config_h",
srcs=["src/config.h.cmake.in"],
outs=["config.h"],
cmd="awk '{ gsub(/^#cmakedefine/, \"//cmakedefine\"); print; }' $(<) > $(@)",
)
genrule(
name="logging_h",
srcs=["src/glog/logging.h.in"],
outs=["glog/logging.h"],
cmd="$(location :gen_sh) < $(<) > $(@)",
tools=[":gen_sh"])
genrule(
name="raw_logging_h",
srcs=["src/glog/raw_logging.h.in"],
outs=["glog/raw_logging.h"],
cmd="$(location :gen_sh) < $(<) > $(@)",
tools=[":gen_sh"])
genrule(
name="stl_logging_h",
srcs=["src/glog/stl_logging.h.in"],
outs=["glog/stl_logging.h"],
cmd="$(location :gen_sh) < $(<) > $(@)",
tools=[":gen_sh"])
genrule(
name="vlog_is_on_h",
srcs=["src/glog/vlog_is_on.h.in"],
outs=["glog/vlog_is_on.h"],
cmd="$(location :gen_sh) < $(<) > $(@)",
tools=[":gen_sh"])
genrule(
name="gen_sh",
outs=["gen.sh"],
cmd="""
cat > $@ <<"EOF"
#! /bin/sh
sed -e 's/@ac_cv_have_unistd_h@/1/g' \
-e 's/@ac_cv_have_stdint_h@/1/g' \
-e 's/@ac_cv_have_systypes_h@/1/g' \
-e 's/@ac_cv_have_libgflags_h@/1/g' \
-e 's/@ac_cv_have_uint16_t@/1/g' \
-e 's/@ac_cv_have___builtin_expect@/1/g' \
-e 's/@ac_cv_have_.*@/0/g' \
-e 's/@ac_google_start_namespace@/namespace google {/g' \
-e 's/@ac_google_end_namespace@/}/g' \
-e 's/@ac_google_namespace@/google/g' \
-e 's/@ac_cv___attribute___noinline@/__attribute__((noinline))/g' \
-e 's/@ac_cv___attribute___noreturn@/__attribute__((noreturn))/g' \
-e 's/@ac_cv___attribute___printf_4_5@/__attribute__((__format__ (__printf__, 4, 5)))/g'
EOF""")
licenses(["notice"]) # Apache 2.0
cc_test(
name="glog_test",
srcs=["glog_test.cc"],
copts=["-Iexternal/gtest/include"],
deps=[
"@gtest//:gtest",
"@glog//:glog",
], )
#include <iostream>
#include <string>
#include "glog/logging.h"
#include "gtest/gtest.h"
TEST(GlogTest, Logging) { LOG(INFO) << "Hello world"; }
cc_library(
name="main",
name="gtest",
srcs=glob(
["src/*.cc"], exclude=["src/gtest-all.cc"]),
hdrs=glob(["include/**/*.h", "src/*.h"]),
......
......@@ -19,6 +19,6 @@ cc_test(
srcs=["example_lib_test.cc"],
copts=["-Iexternal/gtest/include"],
deps=[
"@gtest//:main",
"@gtest//:gtest",
":example_lib",
], )
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册