diff --git a/doc/.buildinfo b/doc/.buildinfo
index fc2ad92e9f6e2e04e778695c81e30b318e6208ee..765cc9cc2194d8c057838a7f05829970145b0b80 100644
--- a/doc/.buildinfo
+++ b/doc/.buildinfo
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: 70a318b9e7a63a79aedc16f559247671
+config: abb235454c522821afda02c2aa921d6f
tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/doc/_images/parameters.png b/doc/_images/parameters.png
new file mode 100644
index 0000000000000000000000000000000000000000..2ec67480951e21f0400bce1c34b3108dcd65c18c
Binary files /dev/null and b/doc/_images/parameters.png differ
diff --git a/doc/_sources/build/build_from_source.txt b/doc/_sources/build/build_from_source.txt
index c671f483863c7466a13fac3943e8d58eb74866fc..b8f26f431eb7a04147fe791a8c805427c827fe09 100644
--- a/doc/_sources/build/build_from_source.txt
+++ b/doc/_sources/build/build_from_source.txt
@@ -1,10 +1,9 @@
Installing from Sources
-=================
+==========================
* [1. Download and Setup](#download)
* [2. Requirements](#requirements)
* [3. Build on Ubuntu](#ubuntu)
-* [4. Build on Mac OS X](#mac)
## Download and Setup
You can download PaddlePaddle from the [github source](https://github.com/gangliao/Paddle).
@@ -28,51 +27,26 @@ To compile the source code, your computer must be equipped with GCC >=4.6 or Cla
PaddlePaddle supports some build options. To enable it, first you need to install the related libraries.
-
-
-
- Optional |
- Description |
-
-
- WITH_GPU |
- Compile with GPU mode. |
-
-
- WITH_DOUBLE |
- Compile with double precision floating-point, default: single precision. |
-
-
- WITH_GLOG |
- Compile with glog. If not found, default: an internal log implementation. |
-
-
- WITH_GFLAGS |
- Compile with gflags. If not found, default: an internal flag implementation. |
-
-
- WITH_TESTING |
- Compile with gtest for PaddlePaddle's unit testing. |
-
-
- WITH_DOC |
- Compile to generate PaddlePaddle's docs, default: disabled (OFF) |
-
-
- WITH_SWIG_PY |
- Compile with python predict API, default: disabled (OFF). |
-
-
- WITH_STYLE_CHECK |
- Compile with code style check, default: enabled (ON). |
-
+
+
+
+
+Optional |
+Description |
+
+
+
+WITH_GPU | Compile with GPU mode. |
+WITH_DOUBLE | Compile with double precision floating-point, default: single precision. |
+WITH_GLOG | Compile with glog. If not found, default: an internal log implementation. |
+WITH_GFLAGS | Compile with gflags. If not found, default: an internal flag implementation. |
+WITH_TESTING | Compile with gtest for PaddlePaddle's unit testing. |
+WITH_DOC | Compile to generate PaddlePaddle's docs, default: disabled (OFF). |
+WITH_SWIG_PY | Compile with python predict API, default: disabled (OFF). |
+WITH_STYLE_CHECK | Compile with code style check, default: enabled (ON). |
+
+
**Note:**
- The GPU version works best with Cuda Toolkit 7.5 and cuDNN v5.
@@ -178,12 +152,12 @@ As a simple example, consider the following:
- **Only CPU**
```bash
- cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF
+ cmake .. -DWITH_GPU=OFF
```
- **GPU**
```bash
- cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF
+ cmake .. -DWITH_GPU=ON
```
- **GPU with doc and swig**
@@ -196,7 +170,7 @@ Finally, you can build PaddlePaddle:
```bash
# you can add build option here, such as:
-cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX=
+cmake .. -DWITH_GPU=ON -DCMAKE_INSTALL_PREFIX=
# please use sudo make install, if you want to install PaddlePaddle into the system
make -j `nproc` && make install
# set PaddlePaddle installation path in ~/.bashrc
@@ -216,122 +190,3 @@ sudo pip install /opt/paddle/share/wheels/*.whl
# or just run
sudo paddle version
```
-
-## Building on Mac OS X
-
-### Prerequisites
-This guide is based on Mac OS X 10.11 (El Capitan). Note that if you are running an up to date version of OS X,
-you will already have Python 2.7.10 and Numpy 1.8 installed.
-
-The best option is to use the package manager homebrew to handle installations and upgrades for you.
-To install [homebrew](http://brew.sh/), first open a terminal window (you can find Terminal in the Utilities folder in Applications), and issue the command:
-
-```bash
-# install brew
-/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
-# install pip
-easy_install pip
-```
-
-### Install Dependencies
-
-- **CPU Dependencies**
-
- ```bash
- # Install fundamental dependents
- brew install glog gflags cmake protobuf openblas
-
- # Install google test on Mac OS X
- # Download gtest 1.7.0
- wget https://github.com/google/googletest/archive/release-1.7.0.tar.gz
- tar -xvf googletest-release-1.7.0.tar.gz && cd googletest-release-1.7.0
- # Build gtest
- mkdir build && cmake ..
- make
- # Install gtest library
- sudo cp -r ../include/gtest /usr/local/include/
- sudo cp lib*.a /usr/local/lib
- ```
-
-- **GPU Dependencies(optional)**
-
- To build GPU version, you will need the following installed:
-
- 1. a CUDA-capable GPU
- 2. Mac OS X 10.11 or later
- 2. the Clang compiler and toolchain installed using Xcode
- 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads)
- 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn)
-
- The CUDA development environment relies on tight integration with the host development environment,
- including the host compiler and C runtime libraries, and is therefore only supported on
- distribution versions that have been qualified for this CUDA Toolkit release.
-
- 1. After downloading cuDNN library, issue the following commands:
-
- ```bash
- sudo tar -xzf cudnn-7.5-osx-x64-v5.0-ga.tgz -C /usr/local
- sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
- ```
- 2. Then you need to set DYLD\_LIBRARY\_PATH, PATH environment variables in ~/.bashrc.
-
- ```bash
- export DYLD_LIBRARY_PATH=/usr/local/cuda/lib:$DYLD_LIBRARY_PATH
- export PATH=/usr/local/cuda/bin:$PATH
- ```
-
-### Build and Install
-
-As usual, the best option is to create build folder under paddle project directory.
-
-```bash
-mkdir build && cd build
-cmake ..
-```
-
-CMake first check PaddlePaddle's dependencies in system default path. After installing some optional
-libraries, corresponding build option will be set automatically (for instance, glog, gtest and gflags).
-If still not found, you can manually set it based on CMake error information from your screen.
-
-As a simple example, consider the following:
-
-- **Only CPU**
-
- ```bash
- cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF
- ```
-- **GPU**
-
- ```bash
- cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF
- ```
-
-- **GPU with doc and swig**
-
- ```bash
- cmake .. -DWITH_GPU=ON -DWITH_DOC=ON -DWITH_SWIG_PY=ON
- ```
-
-Finally, you can build PaddlePaddle:
-
-```bash
-# you can add build option here, such as:
-cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX=
-# please use sudo make install, if you want to install PaddlePaddle into the system
-make -j `nproc` && make install
-# set PaddlePaddle installation path in ~/.bashrc
-export PATH=/bin:$PATH
-```
-**Note:**
-
-If you set `WITH_SWIG_PY=ON`, related python dependencies also need to be installed.
-Otherwise, PaddlePaddle will automatically install python dependencies
-at first time when user run paddle commands, such as `paddle version`, `paddle train`.
-It may require sudo privileges:
-
-```bash
-# you can run
-sudo pip install /opt/paddle/share/wheels/*.whl
-# or just run
-sudo paddle version
-```
\ No newline at end of file
diff --git a/doc/_sources/build/contribute_to_paddle.txt b/doc/_sources/build/contribute_to_paddle.txt
index 06fcff61720755432c5618500ac509c5b3f867df..a9ab69c5f42b8d341dca87479a642e28ca58fbf4 100644
--- a/doc/_sources/build/contribute_to_paddle.txt
+++ b/doc/_sources/build/contribute_to_paddle.txt
@@ -4,7 +4,7 @@ We sincerely appreciate your contributions. You can use fork and pull request
workflow to merge your code.
## Code Requirements
-- Your code mush be fully documented by
+- Your code must be fully documented by
[doxygen](http://www.stack.nl/~dimitri/doxygen/) style.
- Make sure the compiler option WITH\_STYLE\_CHECK is on and the compiler
passes the code style check.
@@ -20,16 +20,30 @@ It's just that simple.
## Clone
+Paddle is currently using [git-flow branching model](http://nvie.com/posts/a-successful-git-branching-model/).
+The **develop** is the main branch, and other user's branches are feature branches.
+
Once you've created a fork, you can use your favorite git client to clone your
repo or just head straight to the command line:
```shell
# Clone your fork to your local machine
-git clone https://github.com/USERNAME/Paddle.git
+git clone --branch develop https://github.com/USERNAME/Paddle.git
+```
+If your repository doesn't contain **develop** branch, just create it by your own.
+
+```shell
+git clone https://github.com/USERNAME/Paddle.git Paddle
+cd Paddle
+git checkout -b develop # create develop branch.
+git remote add upstream https://github.com/baidu/Paddle.git # add upstream to baidu/Paddle
+git pull upstream develop # update to upstream
```
+
Then you can start to develop by making a local developement branch
+
```shell
-git checkout -b MY_COOL_STUFF_BRANCH origin/master
+git checkout -b MY_COOL_STUFF_BRANCH
```
## Commit
@@ -41,7 +55,7 @@ Commit your changes by following command lines:
git status
# add modified files
git add xx
-git commit -m "commit info"
+env EDITOR=vim git commit # You can write your comments by vim/nano/emacs.
```
The first line of commit infomation is the title. The second and later lines
are the details if any.
@@ -63,7 +77,7 @@ git remote -v
Update your fork with the latest upstream changes:
```shell
-git pull --rebase upstream HEAD
+git pull --rebase upstream develop
```
If there are no unique commits locally, git will simply perform a fast-forward.
@@ -76,7 +90,7 @@ Now, your local master branch is up-to-date with everything modified upstream.
```shell
# push to your repository in Github
-git push origin HEAD
+git push -u origin MY_COOL_STUFF_BRANCH # create remote branch MY_COOL_STUFF_BRANCH to origin.
```
## Pull Request
@@ -93,9 +107,24 @@ of conflict, you need to do the update manually. You need to do the following on
your local repository:
```shell
git checkout MY_COOL_STUFF_BRANCH
-git pull --rebase upstream HEAD
+git pull upstream develop
# You may need to resolve the conflict according to the git prompt.
# Make and test your code.
-git push -f origin HEAD
+git push origin MY_COOL_STUFF_BRANCH
```
Now your Pull Request is updated with the latest version.
+
+## Revise your pull request
+
+When you revise your pull request according to reviewer's comments, please use 'git commit' instead of 'git commit --amend' to commit your changes so that the reviewers can see the difference between the new pull requrest and the old pull request.
+
+The possible commands are
+
+```shell
+git checkout MY_COOL_STUFF_BRANCH
+git pull upstream develop # update local to newest code base.
+# May be some conflicts will occured.
+# And develop your cool stuff
+env EDITOR=vim git commit # add your revise log
+git push origin MY_COOL_STUFF_BRANCH
+```
diff --git a/doc/_sources/build/docker_install.txt b/doc/_sources/build/docker_install.txt
index 3cd9d1730a22b97c208233ad4c6f9bad8038c1bf..e95de35f4da35fee511551f13bc6026532cce5c3 100644
--- a/doc/_sources/build/docker_install.txt
+++ b/doc/_sources/build/docker_install.txt
@@ -1,42 +1,84 @@
Docker installation guide
-====================
-PaddlePaddle provides some pre-compiled binary, including Docker images, ubuntu deb packages. It is welcomed to contributed more installation package of different linux distribution (such as ubuntu, centos, debian, gentoo and so on). We recommend to use Docker images to deploy PaddlePaddle.
-## Docker installation
+==========================
-Docker is a tool designed to make it easier to create, deploy, and run applications by using containers.
+PaddlePaddle provide the `Docker `_ image. `Docker`_ is a lightweight container utilities. The performance of PaddlePaddle in `Docker`_ container is basically as same as run it in a normal linux. The `Docker`_ is a very convenient way to deliver the binary release for linux programs.
-### PaddlePaddle Docker images
-There are six Docker images:
+.. note::
-- paddledev/paddle:cpu-latest: PaddlePaddle CPU binary image.
-- paddledev/paddle:gpu-latest: PaddlePaddle GPU binary image.
-- paddledev/paddle:cpu-devel-latest: PaddlePaddle CPU binary image plus source code.
-- paddledev/paddle:gpu-devel-latest: PaddlePaddle GPU binary image plus source code.
-- paddledev/paddle:cpu-demo-latest: PaddlePaddle CPU binary image plus source code and demo
-- paddledev/paddle:gpu-demo-latest: PaddlePaddle GPU binary image plus source code and demo
+ The `Docker`_ image is the recommended way to run PaddlePaddle
-Tags with latest will be replaced by a released version.
+PaddlePaddle Docker images
+--------------------------
-### Download and Run Docker images
+There are 12 `images `_ for PaddlePaddle, and the name is :code:`paddle-dev/paddle`, tags are\:
+
+
++-----------------+------------------+------------------------+-----------------------+
+| | normal | devel | demo |
++=================+==================+========================+=======================+
+| CPU | cpu-latest | cpu-devel-latest | cpu-demo-latest |
++-----------------+------------------+------------------------+-----------------------+
+| GPU | gpu-latest | gpu-devel-latest | gpu-demo-latest |
++-----------------+------------------+------------------------+-----------------------+
+| CPU WITHOUT AVX | cpu-noavx-latest | cpu-devel-noavx-latest | cpu-demo-noavx-latest |
++-----------------+------------------+------------------------+-----------------------+
+| GPU WITHOUT AVX | gpu-noavx-latest | gpu-devel-noavx-latest | gpu-demo-noavx-latest |
++-----------------+------------------+------------------------+-----------------------+
+
+And the three columns are:
+
+* normal\: The docker image only contains binary of PaddlePaddle.
+* devel\: The docker image contains PaddlePaddle binary, source code and essential build environment.
+* demo\: The docker image contains the dependencies to run PaddlePaddle demo.
+
+And the four rows are:
+
+* CPU\: CPU Version. Support CPU which has :code:`AVX` instructions.
+* GPU\: GPU Version. Support GPU, and cpu has :code:`AVX` instructions.
+* CPU WITHOUT AVX\: CPU Version, which support most CPU even doesn't have :code:`AVX` instructions.
+* GPU WITHOUT AVX\: GPU Version, which support most CPU even doesn't have :code:`AVX` instructions.
+
+User can choose any version depends on machine. The following script can help you to detect your CPU support :code:`AVX` or not.
+
+.. code-block:: bash
+
+ if cat /proc/cpuinfo | grep -q avx ; then echo "Support AVX"; else echo "Not support AVX"; fi
+
+If the output is :code:`Support AVX`, then you can choose the AVX version of PaddlePaddle, otherwise, you need select :code:`noavx` version of PaddlePaddle. For example, the CPU develop version of PaddlePaddle is :code:`paddle-dev/paddle:cpu-devel-latest`.
+
+The PaddlePaddle images don't contain any entry command. You need to write your entry command to use this image. See :code:`Remote Access` part or just use following command to run a :code:`bash`
+
+.. code-block:: bash
+
+ docker run -it paddledev/paddle:cpu-latest /bin/bash
+
+
+Download and Run Docker images
+------------------------------
You have to install Docker in your machine which has linux kernel version 3.10+ first. You can refer to the official guide https://docs.docker.com/engine/installation/ for further information.
-You can use ```docker pull ```to download images first, or just launch a container with ```docker run```:
-```bash
-docker run -it paddledev/paddle:cpu-latest
-```
+You can use :code:`docker pull ` to download images first, or just launch a container with :code:`docker run` \:
+
+.. code-block:: bash
+
+ docker run -it paddledev/paddle:cpu-latest
+
If you want to launch container with GPU support, you need to set some environment variables at the same time:
-```bash
-export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}"
-export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
-docker run -it paddledev/paddle:gpu-latest
-```
+.. code-block:: bash
+
+ export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')"
+ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
+ docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest
-### Notice
-#### Performance
+Some notes for docker
+---------------------
+
+Performance
++++++++++++
Since Docker is based on the lightweight virtual containers, the CPU computing performance maintains well. And GPU driver and equipments are all mapped to the container, so the GPU computing performance would not be seriously affected.
@@ -45,47 +87,36 @@ If you use high performance nic, such as RDMA(RoCE 40GbE or IB 56GbE), Ethernet(
-#### Remote access
-If you want to enable ssh access background, you need to build an image by yourself. Please refer to official guide https://docs.docker.com/engine/reference/builder/ for further information.
+Remote access
++++++++++++++
-Following is a simple Dockerfile with ssh:
-```bash
-FROM paddledev/paddle
-MAINTAINER PaddlePaddle dev team
+If you want to enable ssh access background, you need to build an image by yourself. Please refer to official guide https://docs.docker.com/engine/reference/builder/ for further information.
-RUN apt-get update
-RUN apt-get install -y openssh-server
-RUN mkdir /var/run/sshd
-RUN echo 'root:root' | chpasswd
+Following is a simple Dockerfile with ssh:
-RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config
-RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config
+.. literalinclude:: ../../doc_cn/build_and_install/install/paddle_ssh.Dockerfile
-EXPOSE 22
+Then you can build an image with Dockerfile and launch a container:
-CMD ["/usr/sbin/sshd", "-D"]
-```
+.. code-block:: bash
-Then you can build an image with Dockerfile and launch a container:
+ # cd into Dockerfile directory
+ docker build . -t paddle_ssh
+ # run container, and map host machine port 8022 to container port 22
+ docker run -d -p 8022:22 --name paddle_ssh_machine paddle_ssh
-```bash
-# cd into Dockerfile directory
-docker build . -t paddle_ssh
-# run container, and map host machine port 8022 to container port 22
-docker run -d -p 8022:22 --name paddle_ssh_machine paddle_ssh
-```
Now, you can ssh on port 8022 to access the container, username is root, password is also root:
-```bash
-ssh -p 8022 root@YOUR_HOST_MACHINE
-```
+.. code-block:: bash
+ ssh -p 8022 root@YOUR_HOST_MACHINE
You can stop and delete the container as following:
-```bash
-# stop
-docker stop paddle_ssh_machine
-# delete
-docker rm paddle_ssh_machine
-```
+
+.. code-block:: bash
+
+ # stop
+ docker stop paddle_ssh_machine
+ # delete
+ docker rm paddle_ssh_machine
diff --git a/doc/_sources/build/index.txt b/doc/_sources/build/index.txt
index d6d0d19e110fc35faec87da90d784a6775b9c91f..511cdea145c7fd0e41566d0a85115dbb06f84058 100644
--- a/doc/_sources/build/index.txt
+++ b/doc/_sources/build/index.txt
@@ -10,31 +10,24 @@ Install PaddlePaddle
install_*
internal/install_from_jumbo.md
+ docker_install.rst
+ ubuntu_install.rst
Build from Source
-----------------
-If you want to hack and contribute PaddlePaddle source code, following guides can help you\:
+.. warning::
-.. toctree::
- :maxdepth: 1
- :glob:
+ Please use :code:`deb` package or :code:`docker` image to install paddle. The building guide is used for hacking or contributing to PaddlePaddle.
+
- build_from_source.md
- contribute_to_paddle.md
-
-Docker and Debian Package installation
---------------------------------------
-
-Note: The installation packages are still in pre-release
-state and your experience of installation may not be smooth.
+If you want to hack and contribute PaddlePaddle source code, following guides can help you\:
-If you want to pack docker image, the following guide can help you\:
.. toctree::
:maxdepth: 1
:glob:
- docker_install.md
- ubuntu_install.md
+ build_from_source.md
+ contribute_to_paddle.md
diff --git a/doc/_sources/build/ubuntu_install.txt b/doc/_sources/build/ubuntu_install.txt
index c30a8f6db5d9eb83390d9374d27aba46fa42a462..ea8042085bf458be96e71017d229d88ad867695b 100644
--- a/doc/_sources/build/ubuntu_install.txt
+++ b/doc/_sources/build/ubuntu_install.txt
@@ -1,21 +1,25 @@
Debian Package installation guide
=================================
-## Debian Package installation
-Currently , PaddlePaddle only provides ubuntu14.04 debian packages.
-There are two versions package, including CPU and GPU. The download address is:
+PaddlePaddle supports :code:`deb` pacakge. The installation of this :code:`deb` package is tested in ubuntu 14.04, but it should be support other debian based linux, too.
-https://github.com/baidu/Paddle/releases/tag/V0.8.0b0
+There are four versions of debian package, :code:`cpu`, :code:`gpu`, :code:`cpu-noavx`, :code:`gpu-noavx`. And :code:`noavx` version is used to support CPU which does not contain :code:`AVX` instructions. The download url of :code:`deb` package is \: https://github.com/baidu/Paddle/releases/
-After downloading PaddlePaddle deb packages, you can run:
+After downloading PaddlePaddle deb packages, you can use :code:`gdebi` install.
-```bash
-dpkg -i paddle-0.8.0b-cpu.deb
-apt-get install -f
-```
-And if you use GPU version deb package, you need to install CUDA toolkit and cuDNN, and set related environment variables(such as LD_LIBRARY_PATH) first. It is normal when `dpkg -i` get errors. `apt-get install -f` will continue install paddle, and install dependences.
+.. code-block:: bash
+
+ gdebi paddle-*.deb
+
+If :code:`gdebi` is not installed, you can use :code:`sudo apt-get install gdebi` to install it.
+
+Or you can use following commands to install PaddlePaddle.
-**Note**
+.. code-block:: bash
+
+ dpkg -i paddle-*.deb
+ apt-get install -f
+
+And if you use GPU version deb package, you need to install CUDA toolkit and cuDNN, and set related environment variables(such as LD_LIBRARY_PATH) first. It is normal when `dpkg -i` get errors. `apt-get install -f` will continue install paddle, and install dependences.
-PaddlePaddle package only supports x86 CPU with AVX instructions. If not, you have to download and build from source code.
diff --git a/doc/_sources/cluster/opensource/cluster_train.txt b/doc/_sources/cluster/opensource/cluster_train.txt
index 4763ede39b049b6c49225dc9ae7add77325d704e..cb493a88f031850cb6a5eeed0ebe9e41bb7e01c3 100644
--- a/doc/_sources/cluster/opensource/cluster_train.txt
+++ b/doc/_sources/cluster/opensource/cluster_train.txt
@@ -1,26 +1,24 @@
-# Cluster Training
+# Distributed Training
-We provide some simple scripts ```paddle/scripts/cluster_train``` to help you to launch cluster training Job to harness PaddlePaddle's distributed trainning. For MPI and other cluster scheduler refer this naive script to implement more robust cluster training platform by yourself.
+In this article, we explain how to run distributed Paddle training jobs on clusters. We will create the distributed version of the single-process training example, [recommendation](https://github.com/baidu/Paddle/tree/develop/demo/recommendation).
-The following cluster demo is based on RECOMMENDATION local training demo in PaddlePaddle ```demo/recommendation``` directory. Assuming you enter the ```paddle/scripts/cluster_train/``` directory.
+[Scripts](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train) used in this article launch distributed jobs via SSH. They also work as a reference for users running more sophisticated cluster management systems like MPI and Kubernetes.
-## Pre-requirements
+## Prerequisite
-Firstly,
+1. Aforementioned scripts use a Python library [fabric](http://www.fabfile.org/) to run SSH commands. We can use `pip` to install fabric:
-```bash
+ ```bash
pip install fabric
-```
-
-Secondly, go through installing scripts to install PaddlePaddle at all nodes to make sure demo can run as local mode. For CUDA enabled training, we assume that CUDA is installed in ```/usr/local/cuda```, otherwise missed cuda runtime libraries error could be reported at cluster runtime. In one word, the local training environment should be well prepared for the simple scripts.
+ ```
-Then you should prepare same ROOT_DIR directory in all nodes. ROOT_DIR is from in cluster_train/conf.py. Assuming that the ROOT_DIR = /home/paddle, you can create ```paddle``` user account as well, at last ```paddle.py``` can ssh connections to all nodes with ```paddle``` user automatically.
+1. We need to install PaddlePaddle on all nodes in the cluster. To enable GPUs, we need to install CUDA in `/usr/local/cuda`; otherwise Paddle would report errors at runtime.
-At last you can create ssh mutual trust relationship between all nodes for easy ssh login, otherwise ```password``` should be provided at runtime from ```paddle.py```.
+1. Set the `ROOT_DIR` variable in [`cluster_train/conf.py`] on all nodes. For convenience, we often create a Unix user `paddle` on all nodes and set `ROOT_DIR=/home/paddle`. In this way, we can write public SSH keys into `/home/paddle/.ssh/authorized_keys` so that user `paddle` can SSH to all nodes without password.
## Prepare Job Workspace
-```Job workspace``` is defined as one package directory which contains dependency libraries, train data, test data, model config file and all other related file dependencies.
+We refer to the directory where we put dependent libraries, config files, etc., as *workspace*.
These ```train/test``` data should be prepared before launching cluster job. To satisfy the requirement that train/test data are placed in different directory from workspace, PADDLE refers train/test data according to index file named as ```train.list/test.list``` which are used in model config file. So the train/test data also contains train.list/test.list two list file. All local training demo already provides scripts to help you create these two files, and all nodes in cluster job will handle files with same logical code in normal condition.
diff --git a/doc/_sources/demo/quick_start/index_en.txt b/doc/_sources/demo/quick_start/index_en.txt
index ee3fa2a2166f497524663574270b239a6170ab19..659485d9be1b6a3e9759a2fd040cb09d1f2a3005 100644
--- a/doc/_sources/demo/quick_start/index_en.txt
+++ b/doc/_sources/demo/quick_start/index_en.txt
@@ -1,4 +1,4 @@
-# Quick Start Tutorial
+# Quick Start
This tutorial will teach the basics of deep learning (DL), including how to implement many different models in PaddlePaddle. You will learn how to:
- Prepare data into the standardized format that PaddlePaddle accepts.
@@ -134,7 +134,7 @@ def process(settings, file_name):
You need to add a data provider definition `define_py_data_sources2` in our network configuration. This definition specifies:
- The path of the training and testing data (`data/train.list`, `data/test.list`).
-- The location of the data provider file (`dataprovider_pow`).
+- The location of the data provider file (`dataprovider_bow`).
- The function to call to get data. (`process`).
- Additional arguments or data. Here it passes the path of word dictionary.
@@ -477,7 +477,7 @@ The scripts of data downloading, network configurations, and training scrips are
Word embedding |
15MB |
8.484% |
-trainer_config.bow.py |
+trainer_config.emb.py |
diff --git a/doc/_sources/demo/semantic_role_labeling/semantic_role_labeling.txt b/doc/_sources/demo/semantic_role_labeling/semantic_role_labeling.txt
index 05fbc8278daf204df60ad19b742c920e47128c27..890f7314582c65e9add50664006b57aa4e0709eb 100644
--- a/doc/_sources/demo/semantic_role_labeling/semantic_role_labeling.txt
+++ b/doc/_sources/demo/semantic_role_labeling/semantic_role_labeling.txt
@@ -1,183 +1,183 @@
-# Semantic Role labeling Tutorial #
-
-Semantic role labeling (SRL) is a form of shallow semantic parsing whose goal is to discover the predicate-argument structure of each predicate in a given input sentence. SRL is useful as an intermediate step in a wide range of natural language processing tasks, such as information extraction. automatic document categorization and question answering. An instance is as following [1]:
-
- [ A0 He ] [ AM-MOD would ][ AM-NEG n’t ] [ V accept] [ A1 anything of value ] from [A2 those he was writing about ].
-
-- V: verb
-- A0: acceptor
-- A1: thing accepted
-- A2: accepted-from
-- A3: Attribute
-- AM-MOD: modal
-- AM-NEG: negation
-
-Given the verb "accept", the chunks in sentence would play certain semantic roles. Here, the label scheme is from Penn Proposition Bank.
-
-To this date, most of the successful SRL systems are built on top of some form of parsing results where pre-defined feature templates over the syntactic structure are used. This tutorial will present an end-to-end system using deep bidirectional long short-term memory (DB-LSTM)[2] for solving the SRL task, which largely outperforms the previous state-of-the-art systems. The system regards SRL task as the sequence labelling problem.
-
-## Data Description
-The relevant paper[2] takes the data set in CoNLL-2005&2012 Shared Task for training and testing. Accordingto data license, the demo adopts the test data set of CoNLL-2005, which can be reached on website.
-
-To download and process the original data, user just need to execute the following command:
-
-```bash
-cd data
-./get_data.sh
-```
-Several new files appear in the `data `directory as follows.
-```bash
-conll05st-release:the test data set of CoNll-2005 shared task
-test.wsj.words:the Wall Street Journal data sentences
-test.wsj.props: the propositional arguments
-src.dict:the dictionary of words in sentences
-tgt.dict:the labels dictionary
-feature: the extracted features from data set
-```
-
-## Training
-### DB-LSTM
-Please refer to the Sentiment Analysis demo to learn more about the long short-term memory unit.
-
-Unlike Bidirectional-LSTM that used in Sentiment Analysis demo, the DB-LSTM adopts another way to stack LSTM layer. First a standard LSTM processes the sequence in forward direction. The input and output of this LSTM layer are taken by the next LSTM layer as input, processed in reversed direction. These two standard LSTM layers compose a pair of LSTM. Then we stack LSTM layers pair after pair to obtain the deep LSTM model.
-
-The following figure shows a temporal expanded 2-layer DB-LSTM network.
-
-
-
-
-### Features
-Two input features play an essential role in this pipeline: predicate (pred) and argument (argu). Two other features: predicate context (ctx-p) and region mark (mr) are also adopted. Because a single predicate word can not exactly describe the predicate information, especially when the same words appear more than one times in a sentence. With the predicate context, the ambiguity can be largely eliminated. Similarly, we use region mark mr = 1 to denote the argument position if it locates in the predicate context region, or mr = 0 if does not. These four simple features are all we need for our SRL system. Features of one sample with context size set to 1 is showed as following[2]:
-
-
-
-
-In this sample, the coresponding labelled sentence is:
-
-[ A1 A record date ] has [ AM-NEG n't ] been [ V set ] .
-
-In the demo, we adopt the feature template as above, consists of : `argument`, `predicate`, `ctx-p (p=-1,0,1)`, `mark` and use `B/I/O` scheme to label each argument. These features and labels are stored in `feature` file, and separated by `\t`.
-
-### Data Provider
-
-`dataprovider.py` is the python file to wrap data. `hook()` function is to define the data slots for network. The Six features and label are all IndexSlots.
-```
-def hook(settings, word_dict, label_dict, **kwargs):
- settings.word_dict = word_dict
- settings.label_dict = label_dict
- #all inputs are integral and sequential type
- settings.slots = [
- integer_value_sequence(len(word_dict)),
- integer_value_sequence(len(word_dict)),
- integer_value_sequence(len(word_dict)),
- integer_value_sequence(len(word_dict)),
- integer_value_sequence(len(word_dict)),
- integer_value_sequence(2),
- integer_value_sequence(len(label_dict))]
-```
-The corresponding data iterator is as following:
-```
-@provider(use_seq=True, init_hook=hook)
-def process(obj, file_name):
- with open(file_name, 'r') as fdata:
- for line in fdata:
- sentence, predicate, ctx_n1, ctx_0, ctx_p1, mark, label = line.strip().split('\t')
- words = sentence.split()
- sen_len = len(words)
- word_slot = [obj.word_dict.get(w, UNK_IDX) for w in words]
-
- predicate_slot = [obj.word_dict.get(predicate, UNK_IDX)] * sen_len
- ctx_n1_slot = [obj.word_dict.get(ctx_n1, UNK_IDX) ] * sen_len
- ctx_0_slot = [obj.word_dict.get(ctx_0, UNK_IDX) ] * sen_len
- ctx_p1_slot = [obj.word_dict.get(ctx_p1, UNK_IDX) ] * sen_len
-
- marks = mark.split()
- mark_slot = [int(w) for w in marks]
-
- label_list = label.split()
- label_slot = [obj.label_dict.get(w) for w in label_list]
-
- yield word_slot, predicate_slot, ctx_n1_slot, ctx_0_slot, ctx_p1_slot, mark_slot, label_slot
-```
-The `process`function yield 7 lists which are six features and labels.
-
-### Neural Network Config
-`db_lstm.py` is the neural network config file to load the dictionaries and define the data provider module and network architecture during the training procedure.
-
-Seven `data_layer` load instances from data provider. Six features are transformed into embedddings respectively, and mixed by `mixed_layer` . Deep bidirectional LSTM layers extract features for the softmax layer. The objective function is cross entropy of labels.
-
-### Run Training
-The script for training is `train.sh`, user just need to execute:
-```bash
- ./train.sh
-```
-The content in `train.sh`:
-```
-paddle train \
- --config=./db_lstm.py \
- --save_dir=./output \
- --trainer_count=4 \
- --log_period=10 \
- --num_passes=500 \
- --use_gpu=false \
- --show_parameter_stats_period=10 \
- --test_all_data_in_one_period=1 \
-2>&1 | tee 'train.log'
-```
-
-- \--config=./db_lstm.py : network config file.
-- \--save_di=./output: output path to save models.
-- \--trainer_count=4 : set thread number (or GPU count).
-- \--log_period=10 : print log every 20 batches.
-- \--num_passes=500: set pass number, one pass in PaddlePaddle means training all samples in dataset one time.
-- \--use_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train.
-- \--show_parameter_stats_period=10: show parameter statistic every 100 batches.
-- \--test_all_data_in_one_period=1: test all data in every testing.
-
-
-After training, the models will be saved in directory `output`.
-
-### Run testing
-The script for testing is `test.sh`, user just need to execute:
-```bash
- ./test.sh
-```
-The main part in `tesh.sh`
-```
-paddle train \
- --config=./db_lstm.py \
- --model_list=$model_list \
- --job=test \
- --config_args=is_test=1 \
-```
-
- - \--config=./db_lstm.py: network config file
- - \--model_list=$model_list.list: model list file
- - \--job=test: indicate the test job
- - \--config_args=is_test=1: flag to indicate test
-
-
-### Run prediction
-The script for prediction is `predict.sh`, user just need to execute:
-```bash
- ./predict.sh
-
-```
-In `predict.sh`, user should offer the network config file, model path, label file, word dictionary file, feature file
-```
-python predict.py
- -c $config_file
- -w $model_path
- -l $label_file
- -d $dict_file
- -i $input_file
-```
-
-`predict.py` is the main executable python script, which includes functions: load model, load data, data prediction. The network model will output the probability distribution of labels. In the demo, we take the label with maximum probability as result. User can also implement the beam search or viterbi decoding upon the probability distribution matrix.
-
-After prediction, the result is saved in `predict.res`.
-
-## Reference
-[1] Martha Palmer, Dan Gildea, and Paul Kingsbury. The Proposition Bank: An Annotated Corpus of Semantic Roles , Computational Linguistics, 31(1), 2005.
-
-[2] Zhou, Jie, and Wei Xu. "End-to-end learning of semantic role labeling using recurrent neural networks." Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015.
+# Semantic Role labeling Tutorial #
+
+Semantic role labeling (SRL) is a form of shallow semantic parsing whose goal is to discover the predicate-argument structure of each predicate in a given input sentence. SRL is useful as an intermediate step in a wide range of natural language processing tasks, such as information extraction. automatic document categorization and question answering. An instance is as following [1]:
+
+ [ A0 He ] [ AM-MOD would ][ AM-NEG n’t ] [ V accept] [ A1 anything of value ] from [A2 those he was writing about ].
+
+- V: verb
+- A0: acceptor
+- A1: thing accepted
+- A2: accepted-from
+- A3: Attribute
+- AM-MOD: modal
+- AM-NEG: negation
+
+Given the verb "accept", the chunks in sentence would play certain semantic roles. Here, the label scheme is from Penn Proposition Bank.
+
+To this date, most of the successful SRL systems are built on top of some form of parsing results where pre-defined feature templates over the syntactic structure are used. This tutorial will present an end-to-end system using deep bidirectional long short-term memory (DB-LSTM)[2] for solving the SRL task, which largely outperforms the previous state-of-the-art systems. The system regards SRL task as the sequence labelling problem.
+
+## Data Description
+The relevant paper[2] takes the data set in CoNLL-2005&2012 Shared Task for training and testing. Accordingto data license, the demo adopts the test data set of CoNLL-2005, which can be reached on website.
+
+To download and process the original data, user just need to execute the following command:
+
+```bash
+cd data
+./get_data.sh
+```
+Several new files appear in the `data `directory as follows.
+```bash
+conll05st-release:the test data set of CoNll-2005 shared task
+test.wsj.words:the Wall Street Journal data sentences
+test.wsj.props: the propositional arguments
+src.dict:the dictionary of words in sentences
+tgt.dict:the labels dictionary
+feature: the extracted features from data set
+```
+
+## Training
+### DB-LSTM
+Please refer to the Sentiment Analysis demo to learn more about the long short-term memory unit.
+
+Unlike Bidirectional-LSTM that used in Sentiment Analysis demo, the DB-LSTM adopts another way to stack LSTM layer. First a standard LSTM processes the sequence in forward direction. The input and output of this LSTM layer are taken by the next LSTM layer as input, processed in reversed direction. These two standard LSTM layers compose a pair of LSTM. Then we stack LSTM layers pair after pair to obtain the deep LSTM model.
+
+The following figure shows a temporal expanded 2-layer DB-LSTM network.
+
+
+
+
+### Features
+Two input features play an essential role in this pipeline: predicate (pred) and argument (argu). Two other features: predicate context (ctx-p) and region mark (mr) are also adopted. Because a single predicate word can not exactly describe the predicate information, especially when the same words appear more than one times in a sentence. With the predicate context, the ambiguity can be largely eliminated. Similarly, we use region mark mr = 1 to denote the argument position if it locates in the predicate context region, or mr = 0 if does not. These four simple features are all we need for our SRL system. Features of one sample with context size set to 1 is showed as following[2]:
+
+
+
+
+In this sample, the coresponding labelled sentence is:
+
+[ A1 A record date ] has [ AM-NEG n't ] been [ V set ] .
+
+In the demo, we adopt the feature template as above, consists of : `argument`, `predicate`, `ctx-p (p=-1,0,1)`, `mark` and use `B/I/O` scheme to label each argument. These features and labels are stored in `feature` file, and separated by `\t`.
+
+### Data Provider
+
+`dataprovider.py` is the python file to wrap data. `hook()` function is to define the data slots for network. The Six features and label are all IndexSlots.
+```
+def hook(settings, word_dict, label_dict, **kwargs):
+ settings.word_dict = word_dict
+ settings.label_dict = label_dict
+ #all inputs are integral and sequential type
+ settings.slots = [
+ integer_value_sequence(len(word_dict)),
+ integer_value_sequence(len(word_dict)),
+ integer_value_sequence(len(word_dict)),
+ integer_value_sequence(len(word_dict)),
+ integer_value_sequence(len(word_dict)),
+ integer_value_sequence(2),
+ integer_value_sequence(len(label_dict))]
+```
+The corresponding data iterator is as following:
+```
+@provider(use_seq=True, init_hook=hook)
+def process(obj, file_name):
+ with open(file_name, 'r') as fdata:
+ for line in fdata:
+ sentence, predicate, ctx_n1, ctx_0, ctx_p1, mark, label = line.strip().split('\t')
+ words = sentence.split()
+ sen_len = len(words)
+ word_slot = [obj.word_dict.get(w, UNK_IDX) for w in words]
+
+ predicate_slot = [obj.word_dict.get(predicate, UNK_IDX)] * sen_len
+ ctx_n1_slot = [obj.word_dict.get(ctx_n1, UNK_IDX) ] * sen_len
+ ctx_0_slot = [obj.word_dict.get(ctx_0, UNK_IDX) ] * sen_len
+ ctx_p1_slot = [obj.word_dict.get(ctx_p1, UNK_IDX) ] * sen_len
+
+ marks = mark.split()
+ mark_slot = [int(w) for w in marks]
+
+ label_list = label.split()
+ label_slot = [obj.label_dict.get(w) for w in label_list]
+
+ yield word_slot, predicate_slot, ctx_n1_slot, ctx_0_slot, ctx_p1_slot, mark_slot, label_slot
+```
+The `process`function yield 7 lists which are six features and labels.
+
+### Neural Network Config
+`db_lstm.py` is the neural network config file to load the dictionaries and define the data provider module and network architecture during the training procedure.
+
+Seven `data_layer` load instances from data provider. Six features are transformed into embedddings respectively, and mixed by `mixed_layer` . Deep bidirectional LSTM layers extract features for the softmax layer. The objective function is cross entropy of labels.
+
+### Run Training
+The script for training is `train.sh`, user just need to execute:
+```bash
+ ./train.sh
+```
+The content in `train.sh`:
+```
+paddle train \
+ --config=./db_lstm.py \
+ --save_dir=./output \
+ --trainer_count=4 \
+ --log_period=10 \
+ --num_passes=500 \
+ --use_gpu=false \
+ --show_parameter_stats_period=10 \
+ --test_all_data_in_one_period=1 \
+2>&1 | tee 'train.log'
+```
+
+- \--config=./db_lstm.py : network config file.
+- \--save_di=./output: output path to save models.
+- \--trainer_count=4 : set thread number (or GPU count).
+- \--log_period=10 : print log every 20 batches.
+- \--num_passes=500: set pass number, one pass in PaddlePaddle means training all samples in dataset one time.
+- \--use_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train.
+- \--show_parameter_stats_period=10: show parameter statistic every 100 batches.
+- \--test_all_data_in_one_period=1: test all data in every testing.
+
+
+After training, the models will be saved in directory `output`.
+
+### Run testing
+The script for testing is `test.sh`, user just need to execute:
+```bash
+ ./test.sh
+```
+The main part in `tesh.sh`
+```
+paddle train \
+ --config=./db_lstm.py \
+ --model_list=$model_list \
+ --job=test \
+ --config_args=is_test=1 \
+```
+
+ - \--config=./db_lstm.py: network config file
+ - \--model_list=$model_list.list: model list file
+ - \--job=test: indicate the test job
+ - \--config_args=is_test=1: flag to indicate test
+
+
+### Run prediction
+The script for prediction is `predict.sh`, user just need to execute:
+```bash
+ ./predict.sh
+
+```
+In `predict.sh`, user should offer the network config file, model path, label file, word dictionary file, feature file
+```
+python predict.py
+ -c $config_file
+ -w $model_path
+ -l $label_file
+ -d $dict_file
+ -i $input_file
+```
+
+`predict.py` is the main executable python script, which includes functions: load model, load data, data prediction. The network model will output the probability distribution of labels. In the demo, we take the label with maximum probability as result. User can also implement the beam search or viterbi decoding upon the probability distribution matrix.
+
+After prediction, the result is saved in `predict.res`.
+
+## Reference
+[1] Martha Palmer, Dan Gildea, and Paul Kingsbury. The Proposition Bank: An Annotated Corpus of Semantic Roles , Computational Linguistics, 31(1), 2005.
+
+[2] Zhou, Jie, and Wei Xu. "End-to-end learning of semantic role labeling using recurrent neural networks." Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015.
diff --git a/doc/_sources/index.txt b/doc/_sources/index.txt
index df03a33fac98c46635eef05d88639235ac72cf8f..cbd08ba52abe529aec84f6b1c2e35300496878a5 100644
--- a/doc/_sources/index.txt
+++ b/doc/_sources/index.txt
@@ -3,11 +3,12 @@ PaddlePaddle Documentation
User Guide
----------
+* [Introduction](introduction/index.md)
* [Quick Start](demo/quick_start/index_en.md)
* [Build and Installation](build/index.rst)
* [Contribute Code](build/contribute_to_paddle.md)
* [User Interface](ui/index.md)
-* [Model Config Interface](ui/api/trainer_config_helpers/index.md)
+* [Model Config Interface](ui/api/trainer_config_helpers/index.rst)
* [Example and Demo](demo/index.md)
* [Cluster Train](cluster/index.md)
diff --git a/doc/_sources/introduction/index.txt b/doc/_sources/introduction/index.txt
new file mode 100644
index 0000000000000000000000000000000000000000..01f52031a1d0247cd0b885218c17001f23685239
--- /dev/null
+++ b/doc/_sources/introduction/index.txt
@@ -0,0 +1,100 @@
+# Introduction
+
+PaddlePaddle is a deep learning platform open-sourced by Baidu. With PaddlePaddle, you can easily train a classic neural network within a couple lines of configuration, or you can build sophisticated models that provide state-of-the-art performance on difficult learning tasks like sentiment analysis, machine translation, image caption and so on.
+
+## 1. A Classic Problem
+
+Now, to give you a hint of what using PaddlePaddle looks like, let's start with a fundamental learning problem - **simple linear regression** : you have observed a set of two-dimensional data points of `X` and `Y`, where `X` is an explanatory variable and `Y` is corresponding dependent variable, and you want to recover the underlying correlation between `X` and `Y`. Linear regression can be used in many practical scenarios. For example, `X` can be a variable about house size, and `Y` a variable about house price. You can build a model that captures relationship between them by observing real estate markets.
+
+## 2. Prepare the Data
+
+Suppose the true relationship can be characterized as `Y = 2X + 0.3`, let's see how to recover this pattern only from observed data. Here is a piece of python code that feeds synthetic data to PaddlePaddle. The code is pretty self-explanatory, the only extra thing you need to add for PaddlePaddle is a definition of input data types.
+
+```python
+# dataprovider.py
+from paddle.trainer.PyDataProvider2 import *
+import random
+
+# define data types of input: 2 real numbers
+@provider(input_types=[dense_vector(1), dense_vector(1)],use_seq=False)
+def process(settings, input_file):
+ for i in xrange(2000):
+ x = random.random()
+ yield [x], [2*x+0.3]
+```
+
+## 3. Train a NeuralNetwork in PaddlePaddle
+
+To recover this relationship between `X` and `Y`, we use a neural network with one layer of linear activation units and a square error cost layer. Don't worry if you are not familiar with these terminologies, it's just saying that we are starting from a random line `Y' = wX + b` , then we gradually adapt `w` and `b` to minimize the difference between `Y'` and `Y`. Here is what it looks like in PaddlePaddle:
+
+```python
+# trainer_config.py
+from paddle.trainer_config_helpers import *
+
+# 1. read data. Suppose you saved above python code as dataprovider.py
+data_file = 'empty.list'
+with open(data_file, 'w') as f: f.writelines(' ')
+define_py_data_sources2(train_list=data_file, test_list=None,
+ module='dataprovider', obj='process',args={})
+
+# 2. learning algorithm
+settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer())
+
+# 3. Network configuration
+x = data_layer(name='x', size=1)
+y = data_layer(name='y', size=1)
+y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b'))
+cost = regression_cost(input=y_predict, label=y)
+outputs(cost)
+```
+
+Some of the most fundamental usages of PaddlePaddle are demonstrated:
+
+- The first part shows how to feed data into PaddlePaddle. In general cases, PaddlePaddle reads raw data from a list of files, and then do some user-defined process to get real input. In this case, we only need to create a placeholder file since we are generating synthetic data on the fly.
+
+- The second part describes learning algorithm. It defines in what ways adjustments are made to model parameters. PaddlePaddle provides a rich set of optimizers, but a simple momentum based optimizer will suffice here, and it processes 12 data points each time.
+
+- Finally, the network configuration. It usually is as simple as "stacking" layers. Three kinds of layers are used in this configuration:
+ - **Data Layer**: a network always starts with one or more data layers. They provide input data to the rest of the network. In this problem, two data layers are used respectively for `X` and `Y`.
+ - **FC Layer**: FC layer is short for Fully Connected Layer, which connects all the input units to current layer and does the actual computation specified as activation function. Computation layers like this are the fundamental building blocks of a deeper model.
+ - **Cost Layer**: in training phase, cost layers are usually the last layers of the network. They measure the performance of current model, and provide guidence to adjust parameters.
+
+Now that everything is ready, you can train the network with a simple command line call:
+ ```
+ paddle train --config=trainer_config.py --save_dir=./output --num_passes=30
+ ```
+
+This means that PaddlePaddle will train this network on the synthectic dataset for 30 passes, and save all the models under path `./output`. You will see from the messages printed out during training phase that the model cost is decreasing as time goes by, which indicates we are getting a closer guess.
+
+
+## 4. Evaluate the Model
+
+Usually, a different dataset that left out during training phase should be used to evalute the models. However, we are lucky enough to know the real answer: `w=2, b=0.3`, thus a better option is to check out model parameters directly.
+
+In PaddlePaddle, training is just to get a collection of model parameters, which are `w` and `b` in this case. Each parameter is saved in an individual file in the popular `numpy` array format. Here is the code that reads parameters from last pass.
+
+```python
+import numpy as np
+import os
+
+def load(file_name):
+ with open(file_name, 'rb') as f:
+ f.read(16) # skip header for float type.
+ return np.fromfile(f, dtype=np.float32)
+
+print 'w=%.6f, b=%.6f' % (load('output/pass-00029/w'), load('output/pass-00029/b'))
+# w=1.999743, b=0.300137
+```
+
+ 
+
+Although starts from a random guess, you can see that value of `w` changes quickly towards 2 and `b` changes quickly towards 0.3. In the end, the predicted line is almost identical with real answer.
+
+There, you have recovered the underlying pattern between `X` and `Y` only from observed data.
+
+
+## 5. Where to Go from Here
+
+- Build and Installation
+- Quick Start
+- Example and Demo
diff --git a/doc/_sources/source/gserver/layers/layer.txt b/doc/_sources/source/gserver/layers/layer.txt
index 807b22ca140ee71208a96e2877b9c5636620b165..4b8e149505f0695ad2fa4be967a50d1a0ac48b43 100644
--- a/doc/_sources/source/gserver/layers/layer.txt
+++ b/doc/_sources/source/gserver/layers/layer.txt
@@ -465,6 +465,11 @@ SumOfSquaresCostLayer
.. doxygenclass:: paddle::SumOfSquaresCostLayer
:members:
+SumCostLayer
+`````````````````````
+.. doxygenclass:: paddle::SumCostLayer
+ :members:
+
CosSimLayer
-----------
.. doxygenclass:: paddle::CosSimLayer
diff --git a/doc/_sources/ui/api/trainer_config_helpers/activations.txt b/doc/_sources/ui/api/trainer_config_helpers/activations.txt
index c4e14ed779efb6f6601d2c5fa41764f318c82848..269e6491e7ebe3899c3fb24fca756a393043473b 100644
--- a/doc/_sources/ui/api/trainer_config_helpers/activations.txt
+++ b/doc/_sources/ui/api/trainer_config_helpers/activations.txt
@@ -1,3 +1,7 @@
+===========
+Activations
+===========
+
BaseActivation
==============
@@ -32,6 +36,13 @@ LinearActivation
.. automodule:: paddle.trainer_config_helpers.activations
:members: LinearActivation
:noindex:
+
+LogActivation
+==================
+
+.. automodule:: paddle.trainer_config_helpers.activations
+ :members: LogActivation
+ :noindex:
SquareActivation
================
@@ -95,4 +106,3 @@ STanhActivation
.. automodule:: paddle.trainer_config_helpers.activations
:members: STanhActivation
:noindex:
-
diff --git a/doc/_sources/ui/api/trainer_config_helpers/activations_index.txt b/doc/_sources/ui/api/trainer_config_helpers/activations_index.txt
deleted file mode 100644
index 1c0b71ab77eec62859c1d7615f6ebe637f3108ac..0000000000000000000000000000000000000000
--- a/doc/_sources/ui/api/trainer_config_helpers/activations_index.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Activations
-===========
-
-.. toctree::
- :maxdepth: 3
-
- activations.rst
diff --git a/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt b/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt
index 0586c9907e472dd98c5f7e9098251f3bc6b88bab..d6a79c13e2316b0fd3d53eb47960a767bcf8abdb 100644
--- a/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt
+++ b/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt
@@ -1,3 +1,7 @@
+==========
+Evaluators
+==========
+
Base
====
.. automodule:: paddle.trainer_config_helpers.evaluators
diff --git a/doc/_sources/ui/api/trainer_config_helpers/evaluators_index.txt b/doc/_sources/ui/api/trainer_config_helpers/evaluators_index.txt
deleted file mode 100644
index 298de3e1a32d36b9102f5ad64cc1b968f418041b..0000000000000000000000000000000000000000
--- a/doc/_sources/ui/api/trainer_config_helpers/evaluators_index.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Evaluators
-==========
-
-.. toctree::
- :maxdepth: 3
-
- evaluators.rst
diff --git a/doc/_sources/ui/api/trainer_config_helpers/index.txt b/doc/_sources/ui/api/trainer_config_helpers/index.txt
index 00fa99bb3fa4c407dc867f91f4c7c495dc4061a1..8395eb75710b3e67ec0c5442f79c999bdacdff42 100644
--- a/doc/_sources/ui/api/trainer_config_helpers/index.txt
+++ b/doc/_sources/ui/api/trainer_config_helpers/index.txt
@@ -1,10 +1,14 @@
-# Model Config Interface
+Model Config Interface
+======================
-* [Optimizer](optimizers_index.rst)
-* [Data Source](data_sources.rst)
-* [Layers](layers_index.rst)
-* [Activations](activations_index.rst)
-* [Poolings](poolings_index.rst)
-* [Networks](networks_index.rst)
-* [Evaluators](evaluators_index.rst)
-* [Parameter and Extra Layer Attribute](attrs.rst)
+.. toctree::
+ :maxdepth: 1
+
+ optimizers.rst
+ data_sources.rst
+ layers.rst
+ activations.rst
+ poolings.rst
+ networks.rst
+ evaluators.rst
+ attrs.rst
diff --git a/doc/_sources/ui/api/trainer_config_helpers/layers.txt b/doc/_sources/ui/api/trainer_config_helpers/layers.txt
index c1d7a7ce815301be7d4193560fc6c27d90cf6e69..4a02af396993207d305be488c993ce94cf20fe1d 100644
--- a/doc/_sources/ui/api/trainer_config_helpers/layers.txt
+++ b/doc/_sources/ui/api/trainer_config_helpers/layers.txt
@@ -1,3 +1,7 @@
+======
+Layers
+======
+
Base
======
@@ -46,6 +50,12 @@ conv_operator
:members: conv_operator
:noindex:
+conv_projection
+---------------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: conv_projection
+ :noindex:
+
conv_shift_layer
------------------
.. automodule:: paddle.trainer_config_helpers.layers
@@ -71,6 +81,18 @@ img_pool_layer
--------------
.. automodule:: paddle.trainer_config_helpers.layers
:members: img_pool_layer
+ :noindex:
+
+spp_layer
+--------------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: spp_layer
+ :noindex:
+
+maxout_layer
+------------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: maxout_layer
:noindex:
Norm Layer
@@ -130,6 +152,12 @@ gru_step_layer
Recurrent Layer Group
=====================
+memory
+------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: memory
+ :noindex:
+
recurrent_group
---------------
.. automodule:: paddle.trainer_config_helpers.layers
@@ -163,6 +191,12 @@ embedding_layer
:members: embedding_layer
:noindex:
+scaling_projection
+-----------------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: scaling_projection
+ :noindex:
+
dotmul_projection
-----------------
.. automodule:: paddle.trainer_config_helpers.layers
@@ -242,6 +276,12 @@ expand_layer
:members: expand_layer
:noindex:
+repeat_layer
+------------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: repeat_layer
+ :noindex:
+
Math Layers
===========
@@ -263,6 +303,12 @@ interpolation_layer
:members: interpolation_layer
:noindex:
+bilinear_interp_layer
+----------------------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: bilinear_interp_layer
+ :noindex:
+
power_layer
-----------
.. automodule:: paddle.trainer_config_helpers.layers
@@ -371,12 +417,24 @@ ctc_layer
:members: ctc_layer
:noindex:
+nce_layer
+-----------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: nce_layer
+ :noindex:
+
hsigmoid
---------
.. automodule:: paddle.trainer_config_helpers.layers
:members: hsigmoid
:noindex:
+sum_cost
+---------
+.. automodule:: paddle.trainer_config_helpers.layers
+ :members: sum_cost
+ :noindex:
+
Check Layer
============
diff --git a/doc/_sources/ui/api/trainer_config_helpers/layers_index.txt b/doc/_sources/ui/api/trainer_config_helpers/layers_index.txt
deleted file mode 100644
index c0daab152148ce769948f600c3101bd79f5a1013..0000000000000000000000000000000000000000
--- a/doc/_sources/ui/api/trainer_config_helpers/layers_index.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Layers
-======
-
-.. toctree::
- :maxdepth: 3
-
- layers.rst
diff --git a/doc/_sources/ui/api/trainer_config_helpers/networks.txt b/doc/_sources/ui/api/trainer_config_helpers/networks.txt
index 2a15b34eaea0b763f992a7225550e6af747f303c..29c52c5ce3078f1755162dbbdd65a059d8ba9fa4 100644
--- a/doc/_sources/ui/api/trainer_config_helpers/networks.txt
+++ b/doc/_sources/ui/api/trainer_config_helpers/networks.txt
@@ -1,3 +1,9 @@
+========
+Networks
+========
+
+The networks module contains pieces of neural network that combine multiple layers.
+
NLP
===
@@ -111,4 +117,3 @@ outputs
.. automodule:: paddle.trainer_config_helpers.networks
:members: outputs
:noindex:
-
diff --git a/doc/_sources/ui/api/trainer_config_helpers/networks_index.txt b/doc/_sources/ui/api/trainer_config_helpers/networks_index.txt
deleted file mode 100644
index 17bc4dfaa6c4ed3cd5daf0476d0d4c15a2067a22..0000000000000000000000000000000000000000
--- a/doc/_sources/ui/api/trainer_config_helpers/networks_index.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Networks
-========
-
-The networks module contains pieces of neural network that combine multiple layers.
-
-.. toctree::
- :maxdepth: 3
-
- networks.rst
diff --git a/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt b/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt
index b487fec64c4ebb5cfbdff1aa101d9b3675776a2c..7ca4e34156e273caf66cc71e6927bfb23bb5235e 100644
--- a/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt
+++ b/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt
@@ -1,3 +1,7 @@
+==========
+Optimizers
+==========
+
BaseSGDOptimizer
================
.. automodule:: paddle.trainer_config_helpers.optimizers
@@ -51,4 +55,3 @@ settings
.. automodule:: paddle.trainer_config_helpers.optimizers
:members: settings
:noindex:
-
diff --git a/doc/_sources/ui/api/trainer_config_helpers/optimizers_index.txt b/doc/_sources/ui/api/trainer_config_helpers/optimizers_index.txt
deleted file mode 100644
index f39f94f0cd6e1a6c3c25eeceb7820a7fbc070570..0000000000000000000000000000000000000000
--- a/doc/_sources/ui/api/trainer_config_helpers/optimizers_index.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Optimizers
-==========
-
-.. toctree::
- :maxdepth: 3
-
- optimizers.rst
diff --git a/doc/_sources/ui/api/trainer_config_helpers/poolings.txt b/doc/_sources/ui/api/trainer_config_helpers/poolings.txt
index caadec639383aad24ed477d8bdaeaa31c0026bb5..66566809d26f59263597b5286c5b27e0bbc9415a 100644
--- a/doc/_sources/ui/api/trainer_config_helpers/poolings.txt
+++ b/doc/_sources/ui/api/trainer_config_helpers/poolings.txt
@@ -1,3 +1,7 @@
+========
+Poolings
+========
+
BasePoolingType
===============
.. automodule:: paddle.trainer_config_helpers.poolings
@@ -27,4 +31,3 @@ SquareRootNPooling
.. automodule:: paddle.trainer_config_helpers.poolings
:members: SquareRootNPooling
:noindex:
-
diff --git a/doc/_sources/ui/api/trainer_config_helpers/poolings_index.txt b/doc/_sources/ui/api/trainer_config_helpers/poolings_index.txt
deleted file mode 100644
index 250d3fa69c0dcedfd689b685fe7b47ec71d02fee..0000000000000000000000000000000000000000
--- a/doc/_sources/ui/api/trainer_config_helpers/poolings_index.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Poolings
-========
-
-These pooling types are used for sequence input, not for images.
-
-.. toctree::
- :maxdepth: 3
-
- poolings.rst
diff --git a/doc/_sources/ui/cmd_argument/argument_outline.txt b/doc/_sources/ui/cmd_argument/argument_outline.txt
index 98dadc270dcac8cb5c05f3065c98bac78671d7fa..d6cc2c6ed7cc1b9209d56b4348497427efe40ac3 100644
--- a/doc/_sources/ui/cmd_argument/argument_outline.txt
+++ b/doc/_sources/ui/cmd_argument/argument_outline.txt
@@ -183,7 +183,7 @@ It looks like there are a lot of arguments. However, most of them are for develo
-GPU | gpu_id |
+GPU | gpu_id |
√ | √ | √ | √ |
@@ -207,6 +207,11 @@ It looks like there are a lot of arguments. However, most of them are for develo
√ | √ | √ | √ |
+
+cudnn_conv_workspace_limit_in_mb |
+√ | √ | √ | √ |
+
+
RNN |
beam_size |
diff --git a/doc/_sources/ui/cmd_argument/detail_introduction.txt b/doc/_sources/ui/cmd_argument/detail_introduction.txt
index 0d0362d022a72b597e78e760893c91df449e5745..07608e5edf740bd3e1242913f1d2d7589ad313aa 100644
--- a/doc/_sources/ui/cmd_argument/detail_introduction.txt
+++ b/doc/_sources/ui/cmd_argument/detail_introduction.txt
@@ -163,6 +163,10 @@
- Choose path to dynamic load NVIDIA CUDA library, for instance, /usr/local/cuda/lib64. [Default]: LD_LIBRARY_PATH
- type: string (default: "", null)
+* `--cudnn_conv_workspace_limit_in_mb`
+ - Specify cuDNN max workspace limit, in units MB, 4096MB=4GB by default.
+ - type: int32 (default: 4096MB=4GB)
+
## NLP: RNN/LSTM/GRU
* `--rnn_use_batch`
- Whether to use batch method for calculation in simple RecurrentLayer.
diff --git a/doc/_static/searchtools.js b/doc/_static/searchtools.js
index a51e0dc5b6ab26465274ba8810118382bc597652..ba82ad4877ca5803883434a08f1aea6afb43f3b6 100644
--- a/doc/_static/searchtools.js
+++ b/doc/_static/searchtools.js
@@ -226,6 +226,106 @@ var Scorer = {
};
+
+
+
+var splitChars = (function() {
+ var result = {};
+ var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
+ 1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
+ 2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
+ 2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
+ 3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
+ 3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
+ 4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
+ 8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
+ 11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
+ 43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
+ var i, j, start, end;
+ for (i = 0; i < singles.length; i++) {
+ result[singles[i]] = true;
+ }
+ var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
+ [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
+ [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
+ [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
+ [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
+ [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
+ [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
+ [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
+ [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
+ [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
+ [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
+ [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
+ [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
+ [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
+ [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
+ [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
+ [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
+ [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
+ [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
+ [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
+ [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
+ [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
+ [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
+ [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
+ [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
+ [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
+ [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
+ [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
+ [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
+ [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
+ [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
+ [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
+ [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
+ [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
+ [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
+ [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
+ [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
+ [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
+ [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
+ [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
+ [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
+ [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
+ [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
+ [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
+ [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
+ [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
+ [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
+ [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
+ [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
+ for (i = 0; i < ranges.length; i++) {
+ start = ranges[i][0];
+ end = ranges[i][1];
+ for (j = start; j <= end; j++) {
+ result[j] = true;
+ }
+ }
+ return result;
+})();
+
+function splitQuery(query) {
+ var result = [];
+ var start = -1;
+ for (var i = 0; i < query.length; i++) {
+ if (splitChars[query.charCodeAt(i)]) {
+ if (start !== -1) {
+ result.push(query.slice(start, i));
+ start = -1;
+ }
+ } else if (start === -1) {
+ start = i;
+ }
+ }
+ if (start !== -1) {
+ result.push(query.slice(start));
+ }
+ return result;
+}
+
+
+
+
/**
* Search Module
*/
@@ -324,7 +424,7 @@ var Search = {
var searchterms = [];
var excluded = [];
var hlterms = [];
- var tmp = query.split(/\W+/);
+ var tmp = splitQuery(query);
var objectterms = [];
for (i = 0; i < tmp.length; i++) {
if (tmp[i] !== "") {
diff --git a/doc/algorithm/rnn/rnn.html b/doc/algorithm/rnn/rnn.html
index c969e63e5cddcf7975211b0a2d1a782a689ea8f6..f87122a4824f4f23150b06fcc2b0ec793b9fa152 100644
--- a/doc/algorithm/rnn/rnn.html
+++ b/doc/algorithm/rnn/rnn.html
@@ -330,7 +330,7 @@ Its output function simply takes \(x_t\)