diff --git a/doc/.buildinfo b/doc/.buildinfo new file mode 100644 index 0000000000000000000000000000000000000000..6b7cc993be81e57077d5ba922dd2de70629cf223 --- /dev/null +++ b/doc/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 5bb206a2182263ffcb7c4270c50bc7c9 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/doc/_images/FullyConnected.jpg b/doc/_images/FullyConnected.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2241f401434e527f95ee4e0e541a3f2ff78fd1e Binary files /dev/null and b/doc/_images/FullyConnected.jpg differ diff --git a/doc/_images/NetContinuous_en.png b/doc/_images/NetContinuous_en.png new file mode 100644 index 0000000000000000000000000000000000000000..69be34f962c51d233548f0bc74990cd4853e5260 Binary files /dev/null and b/doc/_images/NetContinuous_en.png differ diff --git a/doc/_images/NetConv_en.png b/doc/_images/NetConv_en.png new file mode 100644 index 0000000000000000000000000000000000000000..01fe4d725df6e1debbdf3427e61a4a41987e6003 Binary files /dev/null and b/doc/_images/NetConv_en.png differ diff --git a/doc/_images/NetLR_en.png b/doc/_images/NetLR_en.png new file mode 100644 index 0000000000000000000000000000000000000000..d6dc01f87e1f33e51232d7a26fd75c1be004caea Binary files /dev/null and b/doc/_images/NetLR_en.png differ diff --git a/doc/_images/NetRNN_en.png b/doc/_images/NetRNN_en.png new file mode 100644 index 0000000000000000000000000000000000000000..8c11b8ae0c3d32c48fcca6de47ea12ffbbaa106a Binary files /dev/null and b/doc/_images/NetRNN_en.png differ diff --git a/doc/_images/PipelineNetwork_en.jpg b/doc/_images/PipelineNetwork_en.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e779aed06d5cdb2b442754e7915e79b72946418e Binary files /dev/null and b/doc/_images/PipelineNetwork_en.jpg differ diff --git a/doc/_images/PipelineTest_en.png b/doc/_images/PipelineTest_en.png new file mode 100644 index 0000000000000000000000000000000000000000..35fa4314d68fdc3d3adba32adecfc7ce88246cbe Binary files /dev/null and b/doc/_images/PipelineTest_en.png differ diff --git a/doc/_images/PipelineTrain_en.png b/doc/_images/PipelineTrain_en.png new file mode 100644 index 0000000000000000000000000000000000000000..c443f416821d9e637504c5a72f0bc20f69a8367e Binary files /dev/null and b/doc/_images/PipelineTrain_en.png differ diff --git a/doc/_images/Pipeline_en.jpg b/doc/_images/Pipeline_en.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21a7a7bb6a1af746120e6f4f51f797b6aaafb9d8 Binary files /dev/null and b/doc/_images/Pipeline_en.jpg differ diff --git a/doc/_images/bi_lstm.jpg b/doc/_images/bi_lstm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adec1606d64d6e35ffe7e62abfa9a09309b05c84 Binary files /dev/null and b/doc/_images/bi_lstm.jpg differ diff --git a/doc/_images/cifar.png b/doc/_images/cifar.png new file mode 100644 index 0000000000000000000000000000000000000000..f54a0c58837cb3385b32dc57d02cec92666ef0f1 Binary files /dev/null and b/doc/_images/cifar.png differ diff --git a/doc/_images/encoder-decoder-attention-model.png b/doc/_images/encoder-decoder-attention-model.png new file mode 100644 index 0000000000000000000000000000000000000000..79f911d4ba12ac0c0d1a936c9df639c302786914 Binary files /dev/null and b/doc/_images/encoder-decoder-attention-model.png differ diff --git a/doc/_images/feature.jpg b/doc/_images/feature.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e3310e4ace5613917e7779d3198ccbb3cdc5ada Binary files /dev/null and b/doc/_images/feature.jpg differ diff --git a/doc/_images/image_classification.png b/doc/_images/image_classification.png new file mode 100644 index 0000000000000000000000000000000000000000..14f255805081c1b4fab27eaf336fd389fa93ca19 Binary files /dev/null and b/doc/_images/image_classification.png differ diff --git a/doc/_images/lenet.png b/doc/_images/lenet.png new file mode 100644 index 0000000000000000000000000000000000000000..1e6f2b32bad797f3fccb929c72a121fc935b0cbb Binary files /dev/null and b/doc/_images/lenet.png differ diff --git a/doc/_images/lstm.png b/doc/_images/lstm.png new file mode 100644 index 0000000000000000000000000000000000000000..aaf1fc690da2ffb8418cde5ed81848ddb5263030 Binary files /dev/null and b/doc/_images/lstm.png differ diff --git a/doc/_images/network_arch.png b/doc/_images/network_arch.png new file mode 100644 index 0000000000000000000000000000000000000000..4ae7864212f2a0a38102ee7ff600527ea99fec82 Binary files /dev/null and b/doc/_images/network_arch.png differ diff --git a/doc/_images/neural-n-gram-model.png b/doc/_images/neural-n-gram-model.png new file mode 100644 index 0000000000000000000000000000000000000000..f70b765b3fd69816345a79fc59adfea46008dbfd Binary files /dev/null and b/doc/_images/neural-n-gram-model.png differ diff --git a/doc/_images/plot.png b/doc/_images/plot.png new file mode 100644 index 0000000000000000000000000000000000000000..a31f99791c670e18bb8c62b7604ec8cb0284ffb4 Binary files /dev/null and b/doc/_images/plot.png differ diff --git a/doc/_images/rec_regression_network.png b/doc/_images/rec_regression_network.png new file mode 100644 index 0000000000000000000000000000000000000000..7d2b54d4fcf560cd5b667628f0012c3822efd9b2 Binary files /dev/null and b/doc/_images/rec_regression_network.png differ diff --git a/doc/_images/resnet_block.jpg b/doc/_images/resnet_block.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e16bd3c624030c4c09b358a015b491141b42d8f1 Binary files /dev/null and b/doc/_images/resnet_block.jpg differ diff --git a/doc/_images/stacked_lstm.jpg b/doc/_images/stacked_lstm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4239055050966e0095e188a8c81d860711bce29d Binary files /dev/null and b/doc/_images/stacked_lstm.jpg differ diff --git a/doc/_sources/build/build_from_source.txt b/doc/_sources/build/build_from_source.txt new file mode 100644 index 0000000000000000000000000000000000000000..5ab97d1e167990fc55bd513a8730750ed549d1bd --- /dev/null +++ b/doc/_sources/build/build_from_source.txt @@ -0,0 +1,136 @@ +Build and Install +================= + +## Requirement + +### Dependents + +- **CMake**: required for 2.8+ version +- **g++**: a recent c++ compiler supporting c++11, >= 4.6, < 5 +- **BLAS library**: such as openBLAS, MKL, ATLAS +- **protobuf**: required for 2.4+ version, 3.x is not supported +- **python**: currently only 2.7 version is supported + +### Optional + +PaddlePaddle also support some build options, you have to install related libraries. + +- **WITH_GPU**: Compile with gpu mode + - The GPU version works best with Cuda Toolkit 7.5 and cuDNN v5 + - Other versions Cuda Toolkit 6.5, 7.0 and cuDNN v2, v3, v4 are also supported + - Note: to utilize cuDNN v5, Cuda Toolkit 7.5 is prerequisite and vice versa +- **WITH_DOUBLE**: Compile with double precision, otherwise use single precision +- **WITH_GLOG**: Compile with glog, otherwise use a log implement internally +- **WITH_GFLAGS**: Compile with gflags, otherwise use a flag implement internally +- **WITH_TESTING**: Compile with gtest and run unittest for PaddlePaddle +- **WITH_DOC**: Compile with documentation +- **WITH_SWIG_PY**: Compile with python predict api +- **WITH_STYLE_CHECK**: Style check for source code + + +## Building on Ubuntu14.04 + +### Install Dependencies + +- **CPU Dependencies** + +```bash +# necessary +sudo apt-get update +sudo apt-get install -y g++ make cmake build-essential libatlas-base-dev python python-pip libpython-dev m4 libprotobuf-dev protobuf-compiler python-protobuf python-numpy git +# optional +sudo apt-get install libgoogle-glog-dev +sudo apt-get install libgflags-dev +sudo apt-get install libgtest-dev +pushd /usr/src/gtest +cmake . +make +sudo cp *.a /usr/lib +popd +``` + + +- **GPU Dependencies(optional)** + +If you need to build GPU version, the first thing you need is a machine that has GPU and CUDA installed. +And you also need to install cuDNN. + +You can download CUDA toolkit and cuDNN from nvidia website: + +```bash +https://developer.nvidia.com/cuda-downloads +https://developer.nvidia.com/cudnn +``` +You can copy cuDNN files into the CUDA toolkit directory, such as: + +```bash +sudo tar -xzf cudnn-7.5-linux-x64-v5.1.tgz -C /usr/local +sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn* +``` +Then you need to set LD\_LIBRARY\_PATH, CUDA\_HOME and PATH environment variables in ~/.bashrc. + +```bash +export LD_LIBARAY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH +export CUDA_HOME=/usr/local/cuda +export PATH=/usr/local/cuda/bin:$PATH +``` +- **Python Dependencies(optional)** + +If you want to compile PaddlePaddle with python predict api, you need to add -DWITH_SWIG_PY=ON in cmake command and install these first: + +```bash +sudo apt-get install swig +``` + +- **Doc Dependencies(optional)** + +If you want to compile PaddlePaddle with doc, you need to add -DWITH_DOC=ON in cmake command and install these first: + +```bash +pip install sphinx +pip install sphinx_rtd_theme breathe recommonmark +sudo apt-get install python-sphinx doxygen +``` + +### Build an Install + +CMake will find dependent libraries in system default paths first. After installing some optional libraries, corresponding build option will automatically be on(such as glog, gtest and gflags). And if libraries are not found, you have to set following variables manually in cmake command(CUDNN_ROOT, ATLAS_ROOT, MKL_ROOT, OPENBLAS_ROOT). + +Here are some examples of cmake command with different options: + +**only cpu** + +```bash +cmake -DWITH_GPU=OFF -DWITH_DOC=OFF +``` + +**gpu** + +```bash +cmake -DWITH_GPU=ON -DWITH_DOC=OFF +``` + +**gpu with doc and swig** + +```bash +cmake -DWITH_GPU=ON -DWITH_DOC=ON -DWITH_SWIG_PY=ON +``` + +Finally, you can download source code and build: + +```bash +git clone https://github.com/baidu/Paddle paddle +cd paddle +mkdir build +cd build +# you can add build option here, such as: +cmake -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX= .. +make -j `nproc` && make install +``` +**Note** + +And if you set WITH_SWIG_PY=ON, you have to install related python predict api at the same time: + +```bash +pip install /opt/paddle/share/wheels/*.whl +``` diff --git a/doc/_sources/build/contribute_to_paddle.txt b/doc/_sources/build/contribute_to_paddle.txt new file mode 100644 index 0000000000000000000000000000000000000000..ea5b104255a4f15404ceaeceaf6b8ec80c8383fe --- /dev/null +++ b/doc/_sources/build/contribute_to_paddle.txt @@ -0,0 +1,83 @@ +# Contribute to PaddlePaddle + +We sincerely appreciate your contributions. You can use fork and pull request +workflow to merge your code. + +## Code Requirements +- Your code mush be fully documented by + [doxygen](http://www.stack.nl/~dimitri/doxygen/) style. +- Make sure the compiler option WITH\_STYLE\_CHECK is on and the compiler + passes the code style check. +- All code must have unit test. +- Pass all unit tests. + +The following tutorial guides you into submitting your contibution. + +## [Creating a Fork](https://help.github.com/articles/fork-a-repo/) + +Just head over to the GitHub page and click the "Fork" button. +It's just that simple. + +## Clone + +Once you've created a fork, you can use your favorite git client to clone your +repo or just head straight to the command line: + +```shell +# Clone your fork to your local machine +git clone git@github.com:USERNAME/paddle.git +``` +Then you can start to develop. + +## Commit + +Commit your changes by following command lines: + +```shell +# show the working tree status +git status +# add modified files +git add xx +git commit -m "commit info" +``` +The first line of commit infomation is the title. The second and later lines +are the details if any. + +## Keeping Fork Up to Date + +Before pull your request, you shold sync you code from the latest Paddle. +To do this, you'll need to add a remote at first: + +```shell +# see the current configured remote repository +git remote -v +# add upstream repository +git remote add upstream https://github.com/paddle/paddle.git +# verify the new upstream +git remote -v +``` + +Update your fork with the latest upstream changes: + +```shell +git fetch upstream +git pull upstream master +``` + +If there are no unique commits locally, git will simply perform a fast-forward. +However, if you have been making changes (in the vast majority of cases you +probably shouldn't be), you may have to deal with conflicts. + +Now, your local master branch is up-to-date with everything modified upstream. + +## Push to GitHub + +```shell +# push to your repository in Github +git push origin master +``` + +## Pull Request + +Go to the page for your fork on GitHub, select your development branch, +and click the **pull request button**. diff --git a/doc/_sources/build/index.txt b/doc/_sources/build/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..57d3d3bfc4db04db6e8fa9d56d9d2760523bb531 --- /dev/null +++ b/doc/_sources/build/index.txt @@ -0,0 +1,32 @@ +Build And Install PaddlePaddle +================================ + +Install PaddlePaddle +---------------------- + +.. toctree:: + :glob: + + install_* + +Build from Source +----------------- + +If you want to hack and contribute PaddlePaddle source code, following guides can help you\: + +.. toctree:: + :glob: + + build_from_source.md + internal/contribute_code.md + contribute_to_paddle.md + +Build Docker Images +------------------- + +If you want to pack docker image, the following guide can help you\: + +.. toctree:: + :glob: + + docker/* diff --git a/doc/_sources/cluster/index.txt b/doc/_sources/cluster/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..942248665cbcd05fb9cd8424eea818018035495b --- /dev/null +++ b/doc/_sources/cluster/index.txt @@ -0,0 +1,8 @@ +Cluster Train +==================== + +.. toctree:: + :glob: + + internal/index.md + opensource/cluster_train.md diff --git a/doc/_sources/cluster/opensource/cluster_train.txt b/doc/_sources/cluster/opensource/cluster_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..af447339842529aebe4153a6d0733b37c6d139d9 --- /dev/null +++ b/doc/_sources/cluster/opensource/cluster_train.txt @@ -0,0 +1,137 @@ +# Cluster Training + +We provide this simple scripts to help you to launch cluster training Job to harness PaddlePaddle's distributed trainning. For MPI and other cluster scheduler refer this naive script to implement more robust cluster training platform by yourself. + +The following cluster demo is based on RECOMMENDATION local training demo in PaddlePaddle ```demo/recommendation``` directory. Assuming you enter the cluster_scripts/ directory. + +## Pre-requirements + +Firstly, + +```bash +pip install fabric +``` + +Secondly, go through installing scripts to install PaddlePaddle at all nodes to make sure demo can run as local mode. + +Then you should prepare same ROOT_DIR directory in all nodes. ROOT_DIR is from in cluster_scripts/conf.py. Assuming that the ROOT_DIR = /home/paddle, you can create ```paddle``` user account as well, at last ```paddle.py``` can ssh connections to all nodes with ```paddle``` user automatically. + +At last you can create ssh mutual trust relationship between all nodes for easy ssh login, otherwise ```password``` should be provided at runtime from ```paddle.py```. + +## Prepare Job Workspace + +```Job workspace``` is defined as one package directory which contains dependency libraries, train data, test data, model config file and all other related file dependencies. + +These ```train/test``` data should be prepared before launching cluster job. To satisfy the requirement that train/test data are placed in different directory from workspace, PADDLE refers train/test data according to index file named as ```train.list/test.list``` which are used in model config file. So the train/test data also contains train.list/test.list two list file. All local training demo already provides scripts to help you create these two files, and all nodes in cluster job will handle files with same logical code in normal condition. + +Generally, you can use same model file from local training for cluster training. What you should have in mind that, the ```batch_size``` set in ```setting``` function in model file means batch size in ```each``` node of cluster job instead of total batch size if synchronization SGD was used. + +Following steps are based on demo/recommendation demo in demo directory. + +You just go through demo/recommendation tutorial doc until ```Train``` section, and at last you will get train/test data and model configuration file. Besides, you can place paddle binaries and related dependencies files in this demo/recommendation directory as well. Finaly, just use demo/recommendation as workspace for cluster training. + +At last your workspace should look like as follow: +``` +. +|-- conf +| `-- trainer_config.conf +|-- test +| |-- dnn_instance_000000 +|-- test.list +|-- train +| |-- dnn_instance_000000 +| |-- dnn_instance_000001 +`-- train.list +``` +```conf/trainer_config.conf``` +Indicates the model config file. + +```test``` and ```train``` +Train/test data. Different node should owns different parts of all Train data. This simple script did not do this job, so you should prepare it at last. All test data should be placed at node 0 only. + +```train.list``` and ```test.list``` +File index. It stores all relative or absolute file paths of all train/test data at current node. + + + +## Prepare Cluster Job Configuration + +Set serveral options must be carefully set in cluster_scripts/conf.py + +```HOSTS``` all nodes hostname or ip that will run cluster job. You can also append user and ssh port with hostname, such as root@192.168.100.17:9090. + +```ROOT_DIR``` workspace ROOT directory for placing JOB workspace directory + +```PADDLE_NIC``` the NIC(Network Interface Card) interface name for cluster communication channel, such as eth0 for ethternet, ib0 for infiniband. + +```PADDLE_PORT``` port number for cluster commnunication channel + +```PADDLE_PORTS_NUM``` the number of port used for cluster communication channle. if the number of cluster nodes is small(less than 5~6nodes), recommend you set it to larger, such as 2 ~ 8, for better network performance. + +```PADDLE_PORTS_NUM_FOR_SPARSE``` the number of port used for sparse updater cluster commnunication channel. if sparse remote update is used, set it like ```PADDLE_PORTS_NUM``` + +Default Configuration as follow: + +```python +HOSTS = [ + "root@192.168.100.17", + "root@192.168.100.18", + ] + +''' +workspace configuration +''' + +#root dir for workspace +ROOT_DIR = "/home/paddle" + +''' +network configuration +''' +#pserver nics +PADDLE_NIC = "eth0" +#pserver port +PADDLE_PORT = 7164 +#pserver ports num +PADDLE_PORTS_NUM = 2 +#pserver sparse ports num +PADDLE_PORTS_NUM_FOR_SPARSE = 2 +``` + +### Launching Cluster Job +```paddle.py``` provides automatical scripts to start all PaddlePaddle cluster processes in different nodes. By default, all command line options can set as ```paddle.py``` command options and ```paddle.py``` will transparently and automatically set these options to PaddlePaddle lower level processes. + +```paddle.py```provides two distinguished command option for easy job launching. + +```job_dispatch_package``` set it with local ```workspace```directory, it will be dispatched to all nodes set in conf.py. It could be helpful for frequent hacking workspace files, otherwise frequent mulit-nodes workspace deployment could make your crazy. +```job_workspace``` set it with already deployed workspace directory, ```paddle.py``` will skip dispatch stage to directly launch cluster job with all nodes. It could help to reduce heavy +dispatch latency. + +```cluster_scripts/run.sh``` provides command line sample to run ```demo/recommendation``` cluster job, just modify ```job_dispatch_package``` and ```job_workspace``` with your defined directory, then: +``` +sh run.sh +``` + +The cluster Job will start in several seconds. + +### Kill Cluster Job +```paddle.py``` can capture ```Ctrl + C``` SIGINT signal to automatically kill all processes launched by it. So just stop ```paddle.py``` to kill cluster job. + +### Check Cluster Training Result +Check log in $workspace/log for details, each node owns same log structure. + +```paddle_trainer.INFO``` +It provides almost all interal output log for training, same as local training. Check runtime model convergence here. + +```paddle_pserver2.INFO``` +It provides pserver running log, which could help to diagnose distributed error. + +```server.log``` +It provides stderr and stdout of pserver process. Check error log if training crashs. + +```train.log``` +It provides stderr and stdout of trainer process. Check error log if training crashs. + +### Check Model Output +After one pass finished, model files will be writed in ```output``` directory in node 0. +```nodefile``` in workspace indicates the node id of current cluster job. diff --git a/doc/_sources/demo/embedding_model/index.txt b/doc/_sources/demo/embedding_model/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..b544492e47fa7b9255b91401678ce468ee0c6aa3 --- /dev/null +++ b/doc/_sources/demo/embedding_model/index.txt @@ -0,0 +1,139 @@ +# Chinese Word Embedding Model Tutorial # +---------- +This tutorial is to guide you through the process of using a Pretrained Chinese Word Embedding Model in the PaddlePaddle standard format. + +We thank @lipeng for the pull request that defined the model schemas and pretrained the models. + +## Introduction ### +### Chinese Word Dictionary ### +Our Chinese-word dictionary is created on Baidu ZhiDao and Baidu Baike by using in-house word segmentor. For example, the participle of "《红楼梦》" is "《","红楼梦","》",and "《红楼梦》". Our dictionary (using UTF-8 format) has has two columns: word and its frequency. The total word count is 3206325, including 3 special token: + - ``: the start of a sequence + - ``: the end of a sequence + - ``: a word not included in dictionary + +### Pretrained Chinese Word Embedding Model ### +Inspired by paper [A Neural Probabilistic Language Model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf), our model architecture (**Embedding joint of six words->FullyConnect->SoftMax**) is as following graph. And for our dictionary, we pretrain four models with different word vector dimenstions, i.e 32, 64, 128, 256. +
![](./neural-n-gram-model.png)
+
Figure 1. neural-n-gram-model
+ +### Download and Extract ### +To download and extract our dictionary and pretrained model, run the following commands. + + cd $PADDLE_ROOT/demo/model_zoo/embedding + ./pre_DictAndModel.sh + +## Chinese Paraphrasing Example ## +We provide a paraphrasing task to show the usage of pretrained Chinese Word Dictionary and Embedding Model. + +### Data Preparation and Preprocess ### + +First, run the following commands to download and extract the in-house dataset. The dataset (using UTF-8 format) has 20 training samples, 5 testing samples and 2 generating samples. + + cd $PADDLE_ROOT/demo/seqToseq/data + ./paraphrase_data.sh + +Second, preprocess data and build dictionary on train data by running the following commands, and the preprocessed dataset is stored in `$PADDLE_SOURCE_ROOT/demo/seqToseq/data/pre-paraphrase`: + + cd $PADDLE_ROOT/demo/seqToseq/ + python preprocess.py -i data/paraphrase [--mergeDict] + +- `--mergeDict`: if using this option, the source and target dictionary are merged, i.e, two dictionaries have the same context. Here, as source and target data are all chinese words, this option can be used. + +### User Specified Embedding Model ### +The general command of extracting desired parameters from the pretrained embedding model based on user dictionary is: + + cd $PADDLE_ROOT/demo/model_zoo/embedding + python extract_para.py --preModel PREMODEL --preDict PREDICT --usrModel USRMODEL--usrDict USRDICT -d DIM + +- `--preModel PREMODEL`: the name of pretrained embedding model +- `--preDict PREDICT`: the name of pretrained dictionary +- `--usrModel USRMODEL`: the name of extracted embedding model +- `--usrDict USRDICT`: the name of user specified dictionary +- `-d DIM`: dimension of parameter + +Here, you can simply run the command: + + cd $PADDLE_ROOT/demo/seqToseq/data/ + ./paraphase_model.sh + +And you will see following embedding model structure: + + paraphase_model + |--- _source_language_embedding + |--- _target_language_embedding + +### Training Model in PaddlePaddle ### +First, create a model config file, see example `demo/seqToseq/paraphrase/train.conf`: + + from seqToseq_net import * + is_generating = False + + ################## Data Definition ##################### + train_conf = seq_to_seq_data(data_dir = "./data/pre-paraphrase", + job_mode = job_mode) + + ############## Algorithm Configuration ################## + settings( + learning_method = AdamOptimizer(), + batch_size = 50, + learning_rate = 5e-4) + + ################# Network configure ##################### + gru_encoder_decoder(train_conf, is_generating, word_vector_dim = 32) + +This config is almost the same as `demo/seqToseq/translation/train.conf`. + +Then, train the model by running the command: + + cd $PADDLE_SOURCE_ROOT/demo/seqToseq/paraphrase + ./train.sh + +where `train.sh` is almost the same as `demo/seqToseq/translation/train.sh`, the only difference is following two command arguments: + +- `--init_model_path`: path of the initialization model, here is `data/paraphase_model` +- `--load_missing_parameter_strategy`: operations when model file is missing, here use a normal distibution to initialize the other parameters except for the embedding layer + +For users who want to understand the dataset format, model architecture and training procedure in detail, please refer to [Text generation Tutorial](text_generation.md). + +## Optional Function ## +### Embedding Parameters Observation +For users who want to observe the embedding parameters, this function can convert a PaddlePaddle binary embedding model to a text model by running the command: + + cd $PADDLE_ROOT/demo/model_zoo/embedding + python paraconvert.py --b2t -i INPUT -o OUTPUT -d DIM + +- `-i INPUT`: the name of input binary embedding model +- `-o OUTPUT`: the name of output text embedding model +- `-d DIM`: the dimension of parameter + +You will see parameters like this in output text model: + + 0,4,32156096 + -0.7845433,1.1937413,-0.1704215,0.4154715,0.9566584,-0.5558153,-0.2503305, ...... + 0.0000909,0.0009465,-0.0008813,-0.0008428,0.0007879,0.0000183,0.0001984, ...... + ...... + +- 1st line is **PaddlePaddle format file head**, it has 3 attributes: + - version of PaddlePaddle, here is 0 + - sizeof(float), here is 4 + - total number of parameter, here is 32156096 +- Other lines print the paramters (assume `` = 32) + - each line print 32 paramters splitted by ',' + - there is 32156096/32 = 1004877 lines, meaning there is 1004877 embedding words + +### Embedding Parameters Revision +For users who want to revise the embedding parameters, this function can convert a revised text embedding model to a PaddlePaddle binary model by running the command: + + cd $PADDLE_ROOT/demo/model_zoo/embedding + python paraconvert.py --t2b -i INPUT -o OUTPUT + +- `-i INPUT`: the name of input text embedding model. +- `-o OUTPUT`: the name of output binary embedding model + +Note that the format of input text model is as follows: + + -0.7845433,1.1937413,-0.1704215,0.4154715,0.9566584,-0.5558153,-0.2503305, ...... + 0.0000909,0.0009465,-0.0008813,-0.0008428,0.0007879,0.0000183,0.0001984, ...... + ...... +- there is no file header in 1st line +- each line stores parameters for one word, the separator is commas ',' diff --git a/doc/_sources/demo/image_classification/image_classification.txt b/doc/_sources/demo/image_classification/image_classification.txt new file mode 100644 index 0000000000000000000000000000000000000000..6da20da8a8559c2a86a63e0e7287d165af8f8d40 --- /dev/null +++ b/doc/_sources/demo/image_classification/image_classification.txt @@ -0,0 +1,199 @@ +#Image Classification Tutorial + +This tutorial will guide you through training a convolutional neural network to classify objects using the CIFAR-10 image classification dataset. +As shown in the following figure, the convolutional neural network can recognize the main object in images, and output the classification result. + +
![Image Classification](./image_classification.png)
+ +## Data Preparation +First, download CIFAR-10 dataset. CIFAR-10 dataset can be downloaded from its official website. + + + +We have prepared a script to download and process CIFAR-10 dataset. The script will download CIFAR-10 dataset from the official dataset. +It will convert it to jpeg images and organize them into a directory with the required structure for the tutorial. Make sure that you have installed the python dependency (PIL). + +```bash +cd demo/image_classification/data/ +sh download_cifar.sh +``` + +The CIFAR-10 dataset consists of 60000 32x32 color images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. + +Here are the classes in the dataset, as well as 10 random images from each: +
![Image Classification](./cifar.png)
+ + +After downloading and converting, we should find a directory (cifar-out) containing the dataset in the following format: + +``` +train +---airplane +---automobile +---bird +---cat +---deer +---dog +---frog +---horse +---ship +---truck +test +---airplane +---automobile +---bird +---cat +---deer +---dog +---frog +---horse +---ship +---truck +``` + +It has two directories:`train` and `test`. These two directories contain training data and testing data of CIFAR-10, respectively. Each of these two folders contains 10 sub-folders, ranging from `airplane` to `truck`. Each sub-folder contains images with the corresponding label. After the images are organized into this structure, we are ready to train an image classification model. + +## Preprocess +After the data has been downloaded, it needs to be pre-processed into the Paddle format. We can run the following command for preprocessing. + +``` +cd demo/image_classification/ +sh preprocess.sh +``` + +`preprocess.sh` calls `./demo/image_classification/preprocess.py` to preprocess image data. +```sh +export PYTHONPATH=$PYTHONPATH:../../ +data_dir=./data/cifar-out +python preprocess.py -i $data_dir -s 32 -c 1 +``` + +`./demo/image_classification/preprocess.py` has the following arguments + +- `-i` or `--input` specifes the input data directory. +- `-s` or `--size` specifies the processed size of images. +- `-c` or `--color` specifes whether images are color images or gray images. + + +## Model Training +We need to create a model config file before training the model. An example of the config file (vgg_16_cifar.py) is listed below. **Note**, it is slightly different from the `vgg_16_cifar.py` which also applies to the prediction. + +```python +from paddle.trainer_config_helpers import * +data_dir='data/cifar-out/batches/' +meta_path=data_dir+'batches.meta' +args = {'meta':meta_path, 'mean_img_size': 32, + 'img_size': 32, 'num_classes': 10, + 'use_jpeg': 1, 'color': "color"} +define_py_data_sources2(train_list=data_dir+"train.list", + test_list=data_dir+'test.list', + module='image_provider', + obj='processData', + args=args) +settings( + batch_size = 128, + learning_rate = 0.1 / 128.0, + learning_method = MomentumOptimizer(0.9), + regularization = L2Regularization(0.0005 * 128)) + +img = data_layer(name='image', size=3*32*32) +lbl = data_layer(name="label", size=10) +# small_vgg is predined in trainer_config_helpers.network +predict = small_vgg(input_image=img, num_channels=3) +outputs(classification_cost(input=predict, label=lbl)) +``` + +The first line imports python functions for defining networks. +```python +from paddle.trainer_config_helpers import * +``` + +Then define an `define_py_data_sources2` which use python data provider +interface. The arguments in `args` are used in `image_provider.py` which +yeilds image data and transform them to Paddle. + - `meta`: the mean value of training set. + - `mean_img_size`: the size of mean feature map. + - `img_size`: the height and width of input image. + - `num_classes`: the number of classes. + - `use_jpeg`: the data storage type when preprocessing. + - `color`: specify color image. + +`settings` specifies the training algorithm. In the following example, +it specifies learning rate as 0.1, but divided by batch size, and the weight decay +is 0.0005 and multiplied by batch size. +```python +settings( + batch_size = 128, + learning_rate = 0.1 / 128.0, + learning_method = MomentumOptimizer(0.9), + regularization = L2Regularization(0.0005 * 128) +) +``` + +The `small_vgg` specifies the network. We use a small version of VGG convolutional network as our network +for classification. A description of VGG network can be found here [http://www.robots.ox.ac.uk/~vgg/research/very_deep/](http://www.robots.ox.ac.uk/~vgg/research/very_deep/). +```python +# small_vgg is predined in trainer_config_helpers.network +predict = small_vgg(input_image=img, num_channels=3) +``` +After writing the config, we can train the model by running the script train.sh. Notice that the following script assumes the you run the script in the `./demo/image_classification` folder. If you run the script in a different folder, you need to change the paths of the scripts and the configuration files accordingly. + +```bash +config=vgg_16_cifar.py +output=./cifar_vgg_model +log=train.log + +paddle train \ +--config=$config \ +--dot_period=10 \ +--log_period=100 \ +--test_all_data_in_one_period=1 \ +--use_gpu=1 \ +--save_dir=$output \ +2>&1 | tee $log + +python -m paddle.utils.plotcurve -i $log > plot.png +``` + +- Here we use GPU mode to train. If you have no gpu environment, just set `use_gpu=0`. + +- `./demo/image_classification/vgg_16_cifar.py` is the network and data configuration file. The meaning of the other flags can be found in the documentation of the command line flags. + +- The script `plotcurve.py` requires the python module of `matplotlib`, so if it fails, maybe you need to install `matplotlib`. + + +After training finishes, the training and testing error curve will be saved to `plot.png` using `plotcurve.py` script. An example of the plot is shown below: + +
![Training and testing curves.](./plot.png)
+ + +## Prediction +After we train the model, the model file as well as the model parameters are stored in path `./cifar_vgg_model/pass-%05d`. For example, the model of the 300-th pass is stored at `./cifar_vgg_model/pass-00299`. + +To make a prediction for an image, such as `test.jpg`, one can run `sh classify.sh ./cifar_vgg_model/pass-00299 test.jpg`. The script will output the label of the classfiication. + + +## Exercise +Train a image classification of birds using VGG model and CUB-200 dataset. The birds dataset can be downloaded here. It contains an image dataset with photos of 200 bird species (mostly North American). + + + + + + +## Delve into Details +### Convolutional Neural Network +A Convolutional Neural Network is a feedforward neural network that uses convolution layers. It is very suitable for building neural networks that process and understand images. A standard convolutional neural network is shown below: + +![Convolutional Neural Network](./lenet.png) + +Convolutional Neural Network contains the following layers: + +- Convolutional layer: It uses convolution operation to extract features from an image or a feature map. +- Pooling layer: It uses max-pooling to downsample feature maps. +- Fully Connected layer: It uses fully connected connections to transform features. + +Convolutional Neural Network achieves amazing performance for image classification because it exploits two important characteristics of images: *local correlation* and *spatial invariance*. By iteratively applying convolution and max-pooing operations, convolutional neural network can well represent these two characteristics of images. + + +For more details of how to define layers and their connections, please refer to the documentation of layers. diff --git a/doc/_sources/demo/image_classification/index.txt b/doc/_sources/demo/image_classification/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ea68f14164b22cd211d09d72a7358fe24e4fed7 --- /dev/null +++ b/doc/_sources/demo/image_classification/index.txt @@ -0,0 +1,10 @@ +Image Classification Tutorial +============================= + +.. toctree:: + :maxdepth: 3 + :glob: + + Training Locally + cluster_train/internal/cluster_train.md + cluster_train/opensource/cluster_train.md diff --git a/doc/_sources/demo/imagenet_model/resnet_model.txt b/doc/_sources/demo/imagenet_model/resnet_model.txt new file mode 100644 index 0000000000000000000000000000000000000000..21c3a4cee53e16fb42ac0efdf4c7b9aa7593f335 --- /dev/null +++ b/doc/_sources/demo/imagenet_model/resnet_model.txt @@ -0,0 +1,281 @@ +# Model Zoo - ImageNet # + +[ImageNet](http://www.image-net.org/) is a popular dataset for generic object classification. This tutorial provided convolutional neural network(CNN) models for ImageNet. + +## ResNet Introduction + +ResNets from paper [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385) won the 1st place on the ILSVRC 2015 classification task. They present residual learning framework to ease the training of networks that are substantially deeper than those used previously. The residual connections are shown in following figure. The left building block is used in network of 34 layers and the right bottleneck building block is used in network of 50, 101, 152 layers . + +
![resnet_block](./resnet_block.jpg)
+
Figure 1. ResNet Block
+ +We present three ResNet models, which are converted from the models provided by the authors . The classfication errors tested in PaddlePaddle on 50,000 ILSVRC validation set with input images channel order of **BGR** by single scale with the shorter side of 256 and single crop as following table. +
+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + +
ResNetTop-1Model Size
ResNet-5024.9%99M
ResNet-10123.7%173M
ResNet-15223.2%234M
+
+ +## ResNet Model + +See ```demo/model_zoo/resnet/resnet.py```. This confgiure contains network of 50, 101 and 152 layers. You can specify layer number by adding argument like this ```--config_args=layer_num=50``` in command line arguments. + +### Network Visualization + +You can get a diagram of ResNet network by running the following command. The script generates dot file and then converts dot file to PNG file, which uses installed draw_dot tool in our server. If you can not access the server, just install graphviz to convert dot file. + +``` +cd demo/model_zoo/resnet +./net_diagram.sh +``` + +### Model Download + +``` +cd demo/model_zoo/resnet +./get_model.sh +``` +You can run above command to download all models and mean file and save them in ```demo/model_zoo/resnet/model``` if downloading successfully. + +``` +mean_meta_224 resnet_101 resnet_152 resnet_50 +``` + * resnet_50: model of 50 layers. + * resnet_101: model of 101 layers. + * resnet_152: model of 152 layers. + * mean\_meta\_224: mean file with 3 x 224 x 224 size in **BGR** order. You also can use three mean values: 103.939, 116.779, 123.68. + +### Parameter Info + +* **Convolution Layer Weight** + + As batch normalization layer is connected after each convolution layer, there is no parameter of bias and only one weight in this layer. + shape: `(Co, ky, kx, Ci)` + * Co: channle number of output feature map. + * ky: filter size in vertical direction. + * kx: filter size in horizontal direction. + * Ci: channle number of input feature map. + + 2-Dim matrix: (Co * ky * kx, Ci), saved in row-major order. + +* **Fully connected Layer Weight** + + 2-Dim matrix: (input layer size, this layer size), saved in row-major order. + +* **[Batch Normalization]() Layer Weight** + +There are four parameters in this layer. In fact, only .w0 and .wbias are the learned parameters. The other two are therunning mean and variance respectively. They will be loaded in testing. Following table shows parameters of a batch normzalization layer. +
+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Parameter NameNumberMeaning
_res2_1_branch1_bn.w0256gamma, scale parameter
_res2_1_branch1_bn.w1256mean value of feature map
_res2_1_branch1_bn.w2256variance of feature map
_res2_1_branch1_bn.wbias256beta, shift parameter
+
+ +### Parameter Observation + +Users who want to observe the parameters can use python to read: + +``` +import sys +import numpy as np + +def load(file_name): + with open(file_name, 'rb') as f: + f.read(16) # skip header for float type. + return np.fromfile(f, dtype=np.float32) + +if __name__=='__main__': + weight = load(sys.argv[1]) +``` + +or simply use following shell command: + +``` +od -j 16 -f _res2_1_branch1_bn.w0 +``` + +## Feature Extraction + +We provide both C++ and Python interfaces to extract features. The following examples use data in `demo/model_zoo/resnet/example` to show the extracting process in detail. + +### C++ Interface + +First, specify image data list in `define_py_data_sources` in the config, see example `demo/model_zoo/resnet/resnet.py`. + +``` + train_list = 'train.list' if not is_test else None + # mean.meta is mean file of ImageNet dataset. + # mean.meta size : 3 x 224 x 224. + # If you use three mean value, set like: + # "mean_value:103.939,116.779,123.68;" + args={ + 'mean_meta': "model/mean_meta_224/mean.meta", + 'image_size': 224, 'crop_size': 224, + 'color': True,'swap_channel:': [2, 1, 0]} + define_py_data_sources2(train_list, + 'example/test.list', + module="example.image_list_provider", + obj="processData", + args=args) +``` + +Second, specify layers to extract features in `Outputs()` of `resnet.py`. For example, + +``` +Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn") +``` + +Third, specify model path and output directory in `extract_fea_c++.sh +`, and then run following commands + +``` +cd demo/model_zoo/resnet +./extract_fea_c++.sh +``` + +If successful, features are saved in `fea_output/rank-00000` as follows. And you can use `load_feature_c` interface in `load_feature.py ` to load such a file. + +``` +-0.115318 -0.108358 ... -0.087884;-1.27664 ... -1.11516 -2.59123; +-0.126383 -0.116248 ... -0.00534909;-1.42593 ... -1.04501 -1.40769; +``` + +* Each line stores features of a sample. Here, the first line stores features of `example/dog.jpg` and second line stores features of `example/cat.jpg`. +* Features of different layers are splitted by `;`, and their order is consistent with the layer order in `Outputs()`. Here, the left features are `res5_3_branch2c_conv` layer and right features are `res5_3_branch2c_bn` layer. + +### Python Interface + +`demo/model_zoo/resnet/classify.py` is an example to show how to use python to extract features. Following example still uses data of `./example/test.list`. Command is as follows: + +``` +cd demo/model_zoo/resnet +./extract_fea_py.sh +``` + +extract_fea_py.sh: + +``` +python classify.py \ + --job=extract \ + --conf=resnet.py\ + --mean=model/mean_meta_224/mean.meta \ + --model=model/resnet_50 \ + --data=./example/test.list \ + --output_layer="res5_3_branch2c_conv,res5_3_branch2c_bn" \ + --output_dir=features + +``` +* --job=extract: specify job mode to extract feature. +* --conf=resnet.py: network configure. +* --model=model/resnet_5: model path. +* --data=./example/test.list: data list. +* --output_layer="xxx,xxx": specify layers to extract features. +* --output_dir=features: output diretcoty. + +If run successfully, you will see features saved in `features/batch_0`, this file is produced with cPickle. You can use `load_feature_py` interface in `load_feature.py` to open the file, and it returns a dictionary as follows: + +``` +{ +'cat.jpg': {'res5_3_branch2c_conv': array([[-0.12638293, -0.116248 , -0.11883899, ..., -0.00895038, 0.01994277, -0.00534909]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.42593431, -1.28918779, -1.32414699, ..., -1.45933616, -1.04501402, -1.40769434]], dtype=float32)}, +'dog.jpg': {'res5_3_branch2c_conv': array([[-0.11531784, -0.10835785, -0.08809858, ...,0.0055237, 0.01505112, -0.08788397]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.27663755, -1.18272924, -0.90937918, ..., -1.25178063, -1.11515927, -2.59122872]], dtype=float32)} +} +``` + +Observed carefully, these feature values are consistent with the above results extracted by C++ interface. + +## Prediction + +`classify.py` also can be used to predict. We provide an example script `predict.sh` to predict data in `example/test.list` using a ResNet model with 50 layers. + +``` +cd demo/model_zoo/resnet +./predict.sh +``` + +predict.sh calls the `classify.py`: + +``` +python classify.py \ + --job=predict \ + --conf=resnet.py\ + --multi_crop \ + --model=model/resnet_50 \ + --data=./example/test.list +``` +* --job=extract: speficy job mode to predict. +* --conf=resnet.py: network configure. +* --multi_crop: use 10 crops and average predicting probability. +* --model=model/resnet_50: model path. +* --data=./example/test.list: data list. + +If run successfully, you will see following results, where 156 and 285 are labels of the images. + +``` +Label of example/dog.jpg is: 156 +Label of example/cat.jpg is: 282 +``` diff --git a/doc/_sources/demo/index.txt b/doc/_sources/demo/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..5ca0e56dae0843262e91fa96ca6e02fe1bc53be4 --- /dev/null +++ b/doc/_sources/demo/index.txt @@ -0,0 +1,24 @@ +# Examples and demos +There are serveral examples and demos here. + +## Image + +* [Image Classification](image_classification/index.rst) + +## NLP + +* [Sentiment Analysis](sentiment_analysis/index.rst) +* [Text Generation](text_generation/index.rst) +* [Semantic Role Labeling](semantic_role_labeling/index.md) + +## Recommendation + +* [MovieLens Dataset](rec/ml_dataset.md) +* [MovieLens Regression](rec/ml_regression.rst) + +## Model Zoo +* [ImageNet: ResNet](imagenet_model/resnet_model.md) +* [Embedding: Chinese Word](embedding_model/index.md) + +## Customization +* [Writing New Layers](new_layer/index.md) diff --git a/doc/_sources/demo/new_layer/index.txt b/doc/_sources/demo/new_layer/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..00058ca0c3731b7cb715492a4e35f4df204f8605 --- /dev/null +++ b/doc/_sources/demo/new_layer/index.txt @@ -0,0 +1,290 @@ +Writing New Layers +---------- +This tutorial will guide you to write customized layers in PaddlePaddle. We will utilize fully connected layer as an example to guide you through the following steps for writing a new layer. +- Derive equations for the forward and backward part of the layer. +- Implement C++ class for the layer. +- Implement Python Wrapper for the layer. + +## Derive Equations +First we need to derive equations of the *forward* and *backward* part of the layer. The forward part computes the output given an input. The backward part computes the gradients of the input and the parameters given the the gradients of the output. + +The illustration of a fully connected layer is shown in the following figure. In a fully connected layer, all output nodes are connected to all the input nodes. +
![](./FullyConnected.jpg)
+ +The *forward part* of a layer transforms an input into the corresponding output. +Fully connected layer takes a dense input vector with dimension $D_i$. It uses a transformation matrix $W$ with size $D_i \times D_o$ to project x into a $D_o$ dimensional vector, and add a bias vector $b$ with dimension $D_o$ to the vector. +\[y = f(W^T x + b) \] +where $f(.)$ is an nonlinear *activation* function, such as sigmoid, tanh, and Relu. + +The transformation matrix $W$ and bias vector $b$ are the *parameters* of the layer. The *parameters* of a layer are learned during training in the *backward pass*. The backward pass computes the gradients of the output function with respect to all parameters and inputs. The optimizer can use chain rule to compute the gradients of the loss function with respect to each parameter. Suppose our loss function is $c(y)$, then +\[\frac{\partial c(y)}{\partial x} = \frac{\partial c(y)}{\partial y} \frac{\partial y}{\partial x} \] + +Suppose $z = f(W^T x + b)$, then +\[ \frac{\partial y}{\partial z} = \frac{\partial f(z)}{\partial z}\] + This derivative can be automatically computed by our base layer class. + +Then, for fully connected layer, we need to compute $\frac{\partial z}{\partial x}$, and $\frac{\partial z}{\partial W}$, and $\frac{\partial z}{\partial b}$. +\[ \frac{\partial z}{\partial x} = W \] +\[ \frac{\partial z_j}{\partial W_{ij}} = x_i \] +\[ \frac{\partial z}{\partial b} = \mathbf 1 \] +where $\mathbf 1$ is an all one vector, $W_{ij}$ is the number at the i-th row and j-th column of the matrix $W$, $z_j$ is the j-th component of the vector $z$, and $x_i$ is the i-th component of the vector $x$. + +Then we can use chain rule to calculate $\frac{\partial z}{\partial x}$, and $\frac{\partial z}{\partial W}$. The details of the computation will be given in the next section. + +## Implement C++ Class +The C++ class of the layer implements the initialization, forward, and backward part of the layer. The fully connected layer is at `paddle/gserver/layers/FullyConnectedLayer.h` and `paddle/gserver/layers/FullyConnectedLayer.cpp`. We list simplified version of the code below. + +It needs to derive the base class `paddle::BaseLayer`, and it needs to override the following functions: + +- constructor and destructor. +- `init` function. It is used to initialize the parameters and settings. +- `forward`. It implements the forward part of the layer. +- `backward`. It implements the backward part of the layer. +- `prefetch`. It is utilized to determine the rows corresponding parameter matrix to prefetch from parameter server. You do not need to override this function if your layer does not need remote sparse update. (most layers do not need to support remote sparse update) + + +The header file is listed below: + +```C +namespace paddle { +/** + * A layer has full connections to all neurons in the previous layer. + * It computes an inner product with a set of learned weights, and + * (optionally) adds biases. + * + * The config file api is fc_layer. + */ + +class FullyConnectedLayer : public Layer { +protected: + WeightList weights_; + std::unique_ptr biases_; + +public: + explicit FullyConnectedLayer(const LayerConfig& config) + : Layer(config) {} + ~FullyConnectedLayer() {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + Weight& getWeight(int idx) { return *weights_[idx]; } + + void prefetch(); + void forward(PassType passType); + void backward(const UpdateCallback& callback = nullptr); +}; +} // namespace paddle +``` + +It defines the parameters as class variables. We use `Weight` class as abstraction of parameters. It supports multi-thread update. The details of this class will be described in details in the implementations. +- `weights_` is a list of weights for the transformation matrices. The current implementation can have more than one inputs. Thus, it has a list of weights. One weight corresponds to an input. +- `biases_` is a weight for the bias vector. + +The fully connected layer does not have layer configuration hyper-parameters. If there are some layer hyper-parameters, a common practice is to store it in `LayerConfig& config`, and put it into a class variable in the constructor. + +The following code snippet implements the `init` function. +- First, every `init` function must call the `init` function of the base class `Layer::init(layerMap, parameterMap);`. This statement will initialize the required variables and connections for each layer. +- The it initializes all the weights matrices $W$. The current implementation can have more than one inputs. Thus, it has a list of weights. +- Finally, it initializes the bias. + + +```C +bool FullyConnectedLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + + /* initialize the weightList */ + CHECK(inputLayers_.size() == parameters_.size()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + // Option the parameters + size_t height = inputLayers_[i]->getSize(); + size_t width = getSize(); + + // create a new weight + if (parameters_[i]->isSparse()) { + CHECK_LE(parameters_[i]->getSize(), width * height); + } else { + CHECK_EQ(parameters_[i]->getSize(), width * height); + } + Weight* w = new Weight(height, width, parameters_[i]); + + // append the new weight to the list + weights_.emplace_back(w); + } + + /* initialize biases_ */ + if (biasParameter_.get() != NULL) { + biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); + } + + return true; +} + +``` + + +The implementation of the forward part has the following steps. +- Every layer must call `Layer::forward(passType);` at the beginning of its `forward` function. +- Then it allocates memory for the output using `reserveOutput(batchSize, size);`. This step is necessary because we support the batches to have different batch sizes. `reserveOutput` will change the size of the output accordingly. For the sake of efficiency, we will allocate new memory if we want to expand the matrix, but we will reuse the existing memory block if we want to shrink the matrix. +- Then it computes $\sum_i W_i x + b$ using Matrix operations. `getInput(i).value` retrieve the matrix of the i-th input. Each input is a $batchSize \times dim$ matrix, where each row represents an single input in a batch. For a complete lists of supported matrix operations, please refer to `paddle/math/Matrix.h` and `paddle/math/BaseMatrix.h`. +- Finally it applies the activation function using `forwardActivation();`. It will automatically applies the corresponding activation function specifies in the network configuration. + + +```C +void FullyConnectedLayer::forward(PassType passType) { + Layer::forward(passType); + + /* malloc memory for the output_ if necessary */ + int batchSize = getInput(0).getBatchSize(); + int size = getSize(); + + { + // Settup the size of the output. + reserveOutput(batchSize, size); + } + + MatrixPtr outV = getOutputValue(); + + // Apply the the transformation matrix to each input. + for (size_t i = 0; i != inputLayers_.size(); ++i) { + auto input = getInput(i); + CHECK(input.value) << "The input of 'fc' layer must be matrix"; + i == 0 ? outV->mul(input.value, weights_[i]->getW(), 1, 0) + : outV->mul(input.value, weights_[i]->getW(), 1, 1); + } + + /* add the bias-vector */ + if (biases_.get() != NULL) { + outV->addBias(*(biases_->getW()), 1); + } + + /* activation */ { + forwardActivation(); + } +} +``` + +The implementation of the backward part has the following steps. +- ` backwardActivation();` computes the gradients of the activation. The gradients will be multiplies in place to the gradients of the output, which can be retrieved using `getOutputGrad()`. +- Compute the gradients of bias. Notice that we an use `biases_->getWGrad()` to get the gradient matrix of the corresponding parameter. After the gradient of one parameter is updated, it *MUST* call `getParameterPtr()->incUpdate(callback);`. This is utilize for parameter update over multiple threads or multiple machines. +- Then it computes the gradients of the transformation matrices and inputs, and it calls `incUpdate` for the corresponding parameter. This gives the framework the chance to know whether it has gathered all the gradient to one parameter so that it can do some overlapping work (e.g., network communication) + + +```C +void FullyConnectedLayer::backward(const UpdateCallback& callback) { + /* Do derivation for activations.*/ { + backwardActivation(); + } + + if (biases_ && biases_->getWGrad()) { + biases_->getWGrad()->collectBias(*getOutputGrad(), 1); + + /* Increasing the number of gradient */ + biases_->getParameterPtr()->incUpdate(callback); + } + + bool syncFlag = hl_get_sync_flag(); + + for (size_t i = 0; i != inputLayers_.size(); ++i) { + /* Calculate the W-gradient for the current layer */ + if (weights_[i]->getWGrad()) { + MatrixPtr input_T = getInputValue(i)->getTranspose(); + MatrixPtr oGrad = getOutputGrad(); + { + weights_[i]->getWGrad()->mul(input_T, oGrad, 1, 1); + } + } + + + /* Calculate the input layers error */ + MatrixPtr preGrad = getInputGrad(i); + if (NULL != preGrad) { + MatrixPtr weights_T = weights_[i]->getW()->getTranspose(); + preGrad->mul(getOutputGrad(), weights_T, 1, 1); + } + + { + weights_[i]->getParameterPtr()->incUpdate(callback); + } + } +} +``` + +The `prefetch` function specifies the rows that need to be fetched from parameter server during training. It is only useful for remote sparse training. In remote sparse training, the full parameter matrix is stored distributedly at the parameter server. When the layer uses a batch for training, only a subset of locations of the input is non-zero in this batch. Thus, this layer only needs the rows of the transformation matrix corresponding to the locations of these non-zero entries. The `prefetch` function specifies the ids of these rows. + +Most of the layers do not need remote sparse training function. You do not need to override this function in this case. + +```C +void FullyConnectedLayer::prefetch() { + for (size_t i = 0; i != inputLayers_.size(); ++i) { + auto* sparseParam = + dynamic_cast(weights_[i]->getW().get()); + if (sparseParam) { + MatrixPtr input = getInputValue(i); + sparseParam->addRows(input); + } + } +} +``` + +Finally, you can use `REGISTER_LAYER(fc, FullyConnectedLayer);` to register the layer. `fc` is the identifier of the layer, and `FullyConnectedLayer` is the class name of the layer. + +```C +namespace paddle { +REGISTER_LAYER(fc, FullyConnectedLayer); +} +``` + +If the `cpp` file is put into `paddle/gserver/layers`, it will be automatically compiled. + +## Implement Python Wrapper +Implementing Python wrapper allows us to use the added layer in configuration files. All the Python wrappers are in file `python/paddle/trainer/config_parser.py`. An example of the Python wrapper for fully connected layer is listed below. It has the following steps: +- Use `@config_layer('fc’)` at the decorator for all the Python wrapper class. `fc` is the identifier of the layer. +- Implements `__init__` constructor function. + - It first call `super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)` base constructor function. `FCLayer` is the Python wrapper class name, and `fc` is the layer identifier name. They must be correct in order for the wrapper to work. + - Then it computes the size and format (whether sparse) of each transformation matrix as well as the size. + +```python +@config_layer('fc') +class FCLayer(LayerBase): + def __init__( + self, + name, + size, + inputs, + bias=True, + **xargs): + super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs) + for input_index in xrange(len(self.inputs)): + input_layer = self.get_input_layer(input_index) + psize = self.config.size * input_layer.size + dims = [input_layer.size, self.config.size] + format = self.inputs[input_index].format + sparse = format == "csr" or format == "csc" + + if sparse: + psize = self.inputs[input_index].nnz + + self.create_input_parameter(input_index, psize, dims, sparse, format) + self.create_bias_parameter(bias, self.config.size) +``` + +In network configuration, the layer can be specifies using the following code snippets. The arguments of this class are: +- `name` is the name identifier of the layer instance. +- `type` is the type of the layer, specified using layer identifier. +- `size` is the output size of the layer. +- `bias` specifies whether this layer instance has bias. +- `inputs` specifies a list of layer instance names as inputs. + +```python +Layer( + name = "fc1", + type = "fc", + size = 64, + bias = True, + inputs = [Input("pool3")] +) +``` + +You are also recommended to implement a helper for the Python wrapper, which makes it easier to write models. You can refer to `python/paddle/trainer_config_helpers/layers.py` for examples. diff --git a/doc/_sources/demo/quick_start/index_en.txt b/doc/_sources/demo/quick_start/index_en.txt new file mode 100644 index 0000000000000000000000000000000000000000..3a2c39d11bf1434f8ce77a84be255a4bcc81dd59 --- /dev/null +++ b/doc/_sources/demo/quick_start/index_en.txt @@ -0,0 +1,556 @@ +# PaddlePaddle Quick Start Tutorial + +This tutorial will teach the basics of deep learning (DL), including how to implement many different models in PaddlePaddle. You will learn how to: + - Prepare data into the standardized format that PaddlePaddle accepts. + - Write data providers that read data into PaddlePaddle. + - Configure neural networks in PaddlePaddle layer by layer. + - Train models. + - Perform inference with trained models. + + +## Install + +To get started, please install PaddlePaddle on your computer. Throughout this tutorial, you will learn by implementing different DL models for text classification. + +To install PaddlePaddle, please follow the instructions here: Build and Install. + +## Overview +For the first step, you will use PaddlePaddle to build a **text classification** system. For example, suppose you run an e-commence website, and you want to analyze the sentiment of user reviews to evaluate product quality. + +For example, given the input + +``` +This monitor is fantastic. +``` + +Your classifier should output “positive”, since this text snippet shows that the user is satisfied with the product. Given this input: + +``` +The monitor breaks down two months after purchase. +``` + +the classifier should output “negative“. + +To build your text classification system, your code will need to perform five steps: +
![](./Pipeline_en.jpg)
+ + - Preprocess data into a standardized format. + - Provide data to the learning model. + - Specify the neural network structure. + - Train the model. + - Inference (make prediction on test examples). + + +1. Preprocess data into standardized format + - In the text classification example, you will start with a text file with one training example per line. Each line contains category id (in machine learning, often denoted the target y), followed by the input text (often denoted x); these two elements are separated by a Tab. For example: ```positive [tab] This monitor is fantastic```. You will preprocess this raw data into a format that Paddle can use. + +2. Provide data to the learning model. + - You can write data providers in Python. For any required data preprocessing step, you can add the preprocessing code to the PyDataProvider Python file. + - In our text classification example, every word or character will be converted into an integer id, specified in a dictionary file. It perform a dictionary lookup in PyDataProvider to get the id. +3. Specify neural network structure. (From easy to hard, we provide 4 kinds of network configurations) + - A logistic regression model. + - A word embedding model. + - A convolutional neural network model. + - A sequential recurrent neural network model. + - You will also learn different learning algorithms. +4. Training model. +5. Inference. + +## Preprocess data into standardized format +In this example, you are going to use [Amazon electronic product review dataset](http://jmcauley.ucsd.edu/data/amazon/) to build a bunch of deep neural network models for text classification. Each text in this dataset is a product review. This dataset has two categories: “positive” and “negative”. Positive means the reviewer likes the product, while negative means the reviewer does not like the product. + +`demo/quick_start` provides scripts for downloading data and preprocessing data, as shown below: + +```bash +cd demo/quick_start +./data/get_data.sh +pip install -r requirements.txt +./preprocess.sh +``` + +## Transfer Data to Model +### Write Data Provider with Python +The following `dataprovider_bow.py` gives a complete example of writing data provider with Python. It includes the following parts: + +* initalizer: define the additional meta-data of the data provider and the types of the input data. +* process: Each `yield` returns a data sample. In this case, it return the text representation and category id. The order of features in the returned result needs to be consistent with the definition of the input types in `initalizer`. + +```python +from paddle.trainer.PyDataProvider2 import * + +# id of the word not in dictionary +UNK_IDX = 0 + +# initializer is called by the framework during initialization. +# It allows the user to describe the data types and setup the +# necessary data structure for later use. +# `settings` is an object. initializer need to properly fill settings.input_types. +# initializer can also store other data structures needed to be used at process(). +# In this example, dictionary is stored in settings. +# `dictionay` and `kwargs` are arguments passed from trainer_config.lr.py +def initializer(settings, dictionary, **kwargs): + # Put the word dictionary into settings + settings.word_dict = dictionary + + # setting.input_types specifies what the data types the data provider + # generates. + settings.input_types = [ + # The first input is a sparse_binary_vector, + # which means each dimension of the vector is either 0 or 1. It is the + # bag-of-words (BOW) representation of the texts. + sparse_binary_vector(len(dictionary)), + # The second input is an integer. It represents the category id of the + # sample. 2 means there are two labels in the dataset. + # (1 for positive and 0 for negative) + integer_value(2)] + +# Delaring a data provider. It has an initializer 'data_initialzer'. +# It will cache the generated data of the first pass in memory, so that +# during later pass, no on-the-fly data generation will be needed. +# `setting` is the same object used by initializer() +# `file_name` is the name of a file listed train_list or test_list file given +# to define_py_data_sources2(). See trainer_config.lr.py. +@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) +def process(settings, file_name): + # Open the input data file. + with open(file_name, 'r') as f: + # Read each line. + for line in f: + # Each line contains the label and text of the comment, separated by \t. + label, comment = line.strip().split('\t') + + # Split the words into a list. + words = comment.split() + + # convert the words into a list of ids by looking them up in word_dict. + word_vector = [settings.word_dict.get(w, UNK_IDX) for w in words] + + # Return the features for the current comment. The first is a list + # of ids representing a 0-1 binary sparse vector of the text, + # the second is the integer id of the label. + yield word_vector, int(label) +``` + +### Define Python Data Provider in Configuration files. +You need to add a data provider definition `define_py_data_sources2` in our network configuration. This definition specifies: + +- The path of the training and testing data (`data/train.list`, `data/test.list`). +- The location of the data provider file (`dataprovider_pow`). +- The function to call to get data. (`process`). +- Additional arguments or data. Here it passes the path of word dictionary. + +```python +from paddle.trainer_config_helpers import * + +file = "data/dict.txt" +word_dict = dict() +with open(dict_file, 'r') as f: + for i, line in enumerate(f): + w = line.strip().split()[0] + word_dict[w] = i +# define the data sources for the model. +# We need to use different process for training and prediction. +# For training, the input data includes both word IDs and labels. +# For prediction, the input data only includs word Ids. +define_py_data_sources2(train_list='data/train.list', + test_list='data/test.list', + module="dataprovider_bow", + obj="process", + args={"dictionary": word_dict}) +``` + +You can refer to the following link for more detailed examples +: Python Use Case,The detailed documentation on data format is: PyDataProviderWrapper。 + +## Network Architecture +You will describe four kinds of network architectures in this section. +
![](./PipelineNetwork_en.jpg)
+ +First, you will build a logistic regression model. Later, you will also get chance to build other more powerful network architectures. +For more detailed documentation, you could refer to: Layer documentation。All configuration files are in `demo/quick_start` directory. + +### Logistic Regression +The architecture is illustrated in the following picture: +
![](./NetLR_en.png)
+ +- You need define the data for text features. The size of the data layer is the number of words in the dictionary. + +```python +word = data_layer(name="word", size=voc_dim) +``` + +- You also need to define the category id for each example. The size of the data layer is the number of labels. + +```python +label = data_layer(name="label", size=label_dim) +``` + +- It uses logistic regression model to classify the vector, and it will output the classification error during training. + - Each layer has an *input* argument that specifies its input layer. Some layers can have multiple input layers. You can use a list of the input layers as input in that case. + - *size* for each layer means the number of neurons of the layer. + - *act_type* means activation function applied to the output of each neuron independently. + - Some layers can have additional special inputs. For example, `classification_cost` needs ground truth label as input to compute classification loss and error. +```python +# Define a fully connected layer with logistic activation (also called softmax activation). +output = fc_layer(input=word, + size=label_dim, + act_type=SoftmaxActivation()) +# Define cross-entropy classification loss and error. +classification_cost(input=output, label=label) +``` + +Performance summary: You can refer to the training and testing scripts later. In order to compare different network architectures, the model complexity and test classification error are listed in the following table: + + +
+ + + + + + + + + + + + + + + + + +
Network nameNumber of parametersTest error
Logistic regression252 KB8.652%
+ +
+ +### Word Embedding Model +In order to use the word embedding model, you need to change the data provider a little bit to make the input words as a sequence of word IDs. The revised data provider is listed below. You only need to change initializer() for the type of the first input. It is changed from sparse_binary_vector to sequence of intergers. process() remains the same. This data provider can also be used for later sequence models. + +```python +def initializer(settings, dictionary, **kwargs): + # Put the word dictionary into settings + settings.word_dict = dictionary + settings.input_types = [ + # Define the type of the first input as a sequence of integers. + integer_value_sequence(len(dictionary)), + # Define the second input for label id + integer_value(2)] + +@provider(init_hook=initializer) +def process(settings, file_name): + ... + # omitted, it is same as the data provider for LR model +``` + +This model is very similar to the framework of logistic regression, but it uses word embedding vectors instead of a sparse vectors to represent words. +
![](./NetContinuous_en.png)
+ +- It can look up the dense word embedding vector in the dictionary (its words embedding vector is `word_dim`). The input is a sequence of N words, the output is N word_dim dimensional vectors. + +```python +emb = embedding_layer(input=word, dim=word_dim) +``` + +- It averages all the word embedding in a sentence to get its sentence representation. + +```python +avg = pooling_layer(input=emb, pooling_type=AvgPooling()) +``` + +The other parts of the model are the same as logistic regression network. + +The performance is summarized in the following table:: + + +
+ + + + + + + + + + + + + + + + + +
Network nameNumber of parametersTest error
Word embedding model15 MB8.484%
+
+
+ +### Convolutional Neural Network Model +Convolutional neural network converts a sequence of word embeddings into a sentence representation using temporal convolutions. You will transform the fully connected layer of the word embedding model to 3 new sub-steps. +
![](./NetConv_en.png)
+ + +Text convolution has 3 steps: +1. Get K nearest neighbor context of each word in a sentence, stack them into a 2D vector representation. +2. Apply temporal convolution to this representation to produce a new hidden_dim dimensional vector. +3. Apply max-pooling to the new vectors at all the time steps in a sentence to get a sentence representation. + +```python +# context_len means convolution kernel size. +# context_start means the start of the convolution. It can be negative. In that case, zero padding is applied. +text_conv = sequence_conv_pool(input=emb, + context_start=k, + context_len=2 * k + 1) +``` + +The performance is summarized in the following table: + + +
+ + + + + + + + + + + + + + + + + +
Network nameNumber of parametersTest error
Convolutional model16 MB5.628%
+
+ +### Recurrent Model +
![](./NetRNN_en.png)
+ +You can use Recurrent neural network as our time sequence model, including simple RNN model, GRU model, and LSTM model。 + +- GRU model can be specified via: + +```python +gru = simple_gru(input=emb, size=gru_size) +``` + +- LSTM model can be specified via: + +```python +lstm = simple_lstm(input=emb, size=lstm_size) +``` + +You can use single layer LSTM model with Dropout for our text classification problem. The performance is summarized in the following table: + + +
+ + + + + + + + + + + + + + + + + +
Network nameNumber of parametersTest error
Recurrent model16 MB4.812%
+ +
+ +## Optimization Algorithm +Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network. + +```python +settings(batch_size=128, + learning_rate=2e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(8e-4), + gradient_clipping_threshold=25) +``` + +## Training Model +After completing data preparation and network architecture specification, you will run the training script. +
![](./PipelineTrain_en.png)
+ +Training script: our training script is in `train.sh` file. The training arguments are listed below: + +```bash +paddle train \ +--config=trainer_config.py \ +--log_period=20 \ +--save_dir=./output \ +--num_passes=15 \ +--use_gpu=false +``` + +If you want to install the remote training platform, which enables distributed training on clusters, follow the instructions here: Platform documentation. We do not provide examples on how to train on clusters. Please refer to other demos or platform training documentation for mode details on training on clusters. +## Inference +You can use the trained model to perform prediction on the dataset with no labels. You can also evaluate the model on dataset with labels to obtain its test accuracy. +
![](./PipelineTest_en.png)
+ +The test script (test.sh) is listed below. PaddlePaddle can evaluate a model on the data with labels specified in `test.list`. + +```bash +paddle train \ +--config=trainer_config.lstm.py \ +--use_gpu=false \ +--job=test \ +--init_model_path=./output/pass-0000x +``` + +We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to: Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation. + +inference script (predict.sh): + +```bash +model="output/pass-00003" +paddle train \ + --config=trainer_config.lstm.py \ + --use_gpu=false \ + --job=test \ + --init_model_path=$model \ + --config_args=is_predict=1 \ + --predict_output_dir=. \ + +mv rank-00000 result.txt +``` +There are several differences between training and inference network configurations. +- You do not need labels during inference. +- Outputs need to be specified to the classification probability layer (the output of softmax layer), or the id of maximum probability (`max_id` layer). An example to output the id and probability is given in the code snippet. +- batch_size = 1. +- You need to specify the location of `test_list` in the test data. + +```python +is_predict = get_config_arg('is_predict', bool, False) +trn = 'data/train.list' if not is_predict else None +tst = 'data/test.list' if not is_predict else 'data/pred.list' +obj = 'process' if not is_predict else 'process_pre' +batch_size = 128 if not is_predict else 1 +if is_predict: + maxid = maxid_layer(output) + outputs([maxid,output]) +else: + label = data_layer(name="label", size=2) + cls = classification_cost(input=output, label=label) outputs(cls) +``` + +## Summary +The scripts of data downloading, network configurations, and training scrips are in `/demo/quick_start`. The following table summarizes the performance of our network architecture on Amazon-Elec dataset(25k): + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Network nameNumber of parametersError rateConfiguration file name
Logistic regression model(BOW) 252KB 8.652%trainer_config.lr.py
Word embedding 15MB 8.484%trainer_config.bow.py
Convolution model 16MB 5.628%trainer_config.cnn.py
Time sequence model 16MB 4.812%trainer_config.lstm.py
+
+
+ +## Appendix +### Command Line Argument + +* --config:network architecture path. +* --save_dir:model save directory. +* --log_period:the logging period per batch. +* --num_passes:number of training passes. One pass means the training would go over the whole training dataset once.* --config_args:Other configuration arguments. +* --init_model_path:The path of the initial model parameter. + +By default, the trainer will save model every pass. You can also specify `saving_period_by_batches` to set the frequency of batch saving. You can use `show_parameter_stats_period` to print the statistics of the parameters, which are very useful for tuning parameters. Other command line arguments can be found in command line argument documentation。 + +### Log + +``` +TrainerInternal.cpp:160] Batch=20 samples=2560 AvgCost=0.628761 CurrentCost=0.628761 Eval: classification_error_evaluator=0.304297 CurrentEval: classification_error_evaluator=0.304297 +``` +During model training, you will see the log like the examples above: +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameExplanation
Batch=20 You have trained 20 batches.
samples=2560 You have trained 2560 examples.
AvgCost The average cost from the first batch to the current batch.
CurrentCost the average cost of the last log_period batches
Eval: classification_error_evaluator The average classification error from the first batch to the current batch.
CurrentEval: classification_error_evaluator The average error rate of the last log_period batches
+
+
diff --git a/doc/_sources/demo/rec/ml_dataset.txt b/doc/_sources/demo/rec/ml_dataset.txt new file mode 100644 index 0000000000000000000000000000000000000000..c93a4585e4027b1912da8a77c2562d1ee69c5366 --- /dev/null +++ b/doc/_sources/demo/rec/ml_dataset.txt @@ -0,0 +1,107 @@ +# MovieLens Dataset + +The [MovieLens Dataset](http://grouplens.org/datasets/movielens/) was collected by GroupLens Research. +The data set contains some user information, movie information, and many movie ratings from \[1-5\]. +The data sets have many version depending on the size of set. +We use [MovieLens 1M Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip) as a demo dataset, which contains +1 million ratings from 6000 users on 4000 movies. Released 2/2003. + +## Dataset Features + +In [ml-1m Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip), there are many features in these dataset. +The data files (which have ".dat" extension) in [ml-1m Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip) +is basically CSV file that delimiter is "::". The description in README we quote here. + +### RATINGS FILE DESCRIPTION(ratings.dat) + + +All ratings are contained in the file "ratings.dat" and are in the +following format: + +UserID::MovieID::Rating::Timestamp + +- UserIDs range between 1 and 6040 +- MovieIDs range between 1 and 3952 +- Ratings are made on a 5-star scale (whole-star ratings only) +- Timestamp is represented in seconds since the epoch as returned by time(2) +- Each user has at least 20 ratings + +### USERS FILE DESCRIPTION(users.dat) + +User information is in the file "users.dat" and is in the following +format: + +UserID::Gender::Age::Occupation::Zip-code + +All demographic information is provided voluntarily by the users and is +not checked for accuracy. Only users who have provided some demographic +information are included in this data set. + +- Gender is denoted by a "M" for male and "F" for female +- Age is chosen from the following ranges: + + * 1: "Under 18" + * 18: "18-24" + * 25: "25-34" + * 35: "35-44" + * 45: "45-49" + * 50: "50-55" + * 56: "56+" + +- Occupation is chosen from the following choices: + + * 0: "other" or not specified + * 1: "academic/educator" + * 2: "artist" + * 3: "clerical/admin" + * 4: "college/grad student" + * 5: "customer service" + * 6: "doctor/health care" + * 7: "executive/managerial" + * 8: "farmer" + * 9: "homemaker" + * 10: "K-12 student" + * 11: "lawyer" + * 12: "programmer" + * 13: "retired" + * 14: "sales/marketing" + * 15: "scientist" + * 16: "self-employed" + * 17: "technician/engineer" + * 18: "tradesman/craftsman" + * 19: "unemployed" + * 20: "writer" + +### MOVIES FILE DESCRIPTION(movies.dat) + +Movie information is in the file "movies.dat" and is in the following +format: + +MovieID::Title::Genres + +- Titles are identical to titles provided by the IMDB (including +year of release) +- Genres are pipe-separated and are selected from the following genres: + + * Action + * Adventure + * Animation + * Children's + * Comedy + * Crime + * Documentary + * Drama + * Fantasy + * Film-Noir + * Horror + * Musical + * Mystery + * Romance + * Sci-Fi + * Thriller + * War + * Western + +- Some MovieIDs do not correspond to a movie due to accidental duplicate +entries and/or test entries +- Movies are mostly entered by hand, so errors and inconsistencies may exist diff --git a/doc/_sources/demo/rec/ml_regression.txt b/doc/_sources/demo/rec/ml_regression.txt new file mode 100644 index 0000000000000000000000000000000000000000..eb952c8e7a3d0fe3645079a3904cd55143f3051d --- /dev/null +++ b/doc/_sources/demo/rec/ml_regression.txt @@ -0,0 +1,361 @@ +Regression MovieLens Ratting +============================ + +Here we demonstrate a **Cosine Similarity Regression** job in movie lens dataset. +This demo will show how paddle does (word) embedding job, +handles the similarity regression, +the character-level convolutional networks for text, and how does paddle handle +multiple types of inputs. +Note that the model structure is not fine-tuned and just a demo to show how paddle works. + + +YOU ARE WELCOME TO BUILD A BETTER DEMO +BY USING PADDLEPADDLE, AND LET US KNOW TO MAKE THIS DEMO BETTER. + +Data Preparation +```````````````` +Download and extract dataset +'''''''''''''''''''''''''''' +We use `movielens 1m dataset `_ here. +To download and unzip the dataset, simply run the following commands. + +.. code-block:: bash + + cd demo/recommendation/data + ./ml_data.sh + +And the directory structure of :code:`demo/recommendation/data/ml-1m` is: + +.. code-block:: text + + +--ml-1m + +--- movies.dat # movie features + +--- ratings.dat # ratings + +--- users.dat # user features + +--- README # dataset description + +Field config file +''''''''''''''''' +**Field config file** is used to specific the fields dataset and file format, +i.e, specific **WHAT** type it is in each feature file. + +The field config file of ml-1m shows in :code:`demo/recommendation/data/config.json`. +It specifics the field types and file names: 1) there are four types of field for user file\: id, gender, age and occupation; +2) the filename is "users.dat", and the delimiter of file is "::". + +.. include:: ../../../demo/recommendation/data/config.json + :code: json + :literal: + +Preprocess Data +``````````````` +You need to install python 3rd party libraries. +IT IS HIGHLY RECOMMEND TO USE VIRTUALENV MAKE A CLEAN PYTHON ENVIRONMENT. + +.. code-block:: bash + + pip install -r requirements.txt + +The general command for preprocessing the dataset is: + +.. code-block:: bash + + cd demo/recommendation + ./preprocess.sh + +And the detail steps are introduced as follows. + +Extract Movie/User features to python object +''''''''''''''''''''''''''''''''''''''''''''' + +There are many features in movie or user in movielens 1m dataset. +Each line of rating file just provides a Movie/User id to refer each movie or user. +We process the movie/user feature file first, and pickle the feature (**Meta**) object as a file. + +Meta config file +................ + +**Meta config file** is used to specific **HOW** to parse each field in dataset. +It could be translated from field config file, or written by hand. +Its file format could be either json or yaml syntax file. Parser will automatically choose the file format by extension name. + +To convert Field config file to meta config file, just run: + +.. code-block:: bash + + cd demo/recommendation/data + python config_generator.py config.json > meta_config.json + +The meta config file shows below: + +.. include:: ../../../demo/recommendation/data/meta_config.json + :code: json + :literal: + +There are two kinds of features in meta\: movie and user. + +* in movie file, whose name is movies.dat + * we just split each line by "::" + * pos 0 is id. + * pos 1 feature: + * name is title. + * it uses regex to parse this feature. + * it is a char based word embedding feature. + * it is a sequence. + * pos 2 feature: + * name is genres. + * type is one hot dense vector. + * dictionary is auto generated by parsing, each key is split by '|' +* in user file, whose name is users.dat + * we just split each line by "::" + * pos 0 is id. + * pos 1 feature: + * name is gender + * just simple char based embedding. + * pos 2 feature: + * name is age + * just whole word embedding. + * embedding id will be sort by word. + * pos 3 feature: + * name is occupation. + * just simple whole word embedding. + + +Meta file +''''''''' + +After having meta config file, we can generate **Meta file**, a python pickle object which stores movie/user information. +The following commands could be run to generate it. + +.. code-block:: bash + + python meta_generator.py ml-1m meta.bin --config=meta_config.json + +And the structure of the meta file :code:`meta.bin` is: + +.. code-block:: text + + +--+ movie + | +--+ __meta__ + | | +--+ raw_meta # each feature meta config. list + | | | + + | | | | # ID Field, we use id as key + | | | +--+ {'count': 3883, 'max': 3952, 'is_key': True, 'type': 'id', 'min': 1} + | | | | + | | | | # Titile field, the dictionary list of embedding. + | | | +--+ {'dict': [ ... ], 'type': 'embedding', 'name': 'title', 'seq': 'sequence'} + | | | | + | | | | # Genres field, the genres dictionary + | | | +--+ {'dict': [ ... ], 'type': 'one_hot_dense', 'name': 'genres'} + | | | + | | +--+ feature_map [1, 2] # a list for raw_meta index for feature field. + | | # it means there are 2 features for each key. + | | # * 0 offset of feature is raw_meta[1], Title. + | | # * 1 offset of feature is raw_meta[2], Genres. + | | + | +--+ 1 # movie 1 features + | | + + | | +---+ [[...], [...]] # title ids, genres dense vector + | | + | +--+ 2 + | | + | +--+ ... + | + +--- user + +--+ __meta__ + | + + | +--+ raw_meta + | | + + | | +--+ id field as user + | | | + | | +--+ {'dict': ['F', 'M'], 'type': 'embedding', 'name': 'gender', 'seq': 'no_sequence'} + | | | + | | +--+ {'dict': ['1', '18', '25', '35', '45', '50', '56'], 'type': 'embedding', 'name': 'age', 'seq': 'no_sequence'} + | | | + | | +--+ {'dict': [...], 'type': 'embedding', 'name': 'occupation', 'seq': 'no_sequence'} + | | + | +--+ feature_map [1, 2, 3] + | + +--+ 1 # user 1 features + | + +--+ 2 + +--+ ... + + +Split Training/Testing files +'''''''''''''''''''''''''''' + +We split :code:`ml-1m/ratings.dat` into a training and testing file. The way to split file is for each user, we split the +rating by two parts. So each user in testing file will have some rating information in training file. + +Use separate.py to separate the training and testing file. + +.. code-block:: bash + + python split.py ml-1m/ratings.dat --delimiter="::" --test_ratio=0.1 + +Then two files will be generated\: :code:`ml-1m/ratings.dat.train` and :code:`ml-1m/rating.data.test`. +Move them to workspace :code:`data`, shuffle the train file, and prepare the file list for paddle train. + +.. code-block:: bash + + shuf ml-1m/ratings.dat.train > ratings.dat.train + cp ml-1m/ratings.dat.test . + echo "./data/ratings.dat.train" > train.list + echo "./data/ratings.dat.test" > test.list + + +Neural Network Configuration +```````````````````````````` + +Trainer Config File +''''''''''''''''''' + +The network structure shows below. + +.. image:: rec_regression_network.png + :align: center + :alt: rec_regression_network + +The demo's neural network config file "trainer_config.py" show as below. + +.. include:: ../../../demo/recommendation/trainer_config.py + :code: python + :literal: + +In this :code:`trainer_config.py`, we just map each feature type to +a feature vector, following shows how to map each feature to a vector shows below. + +* :code:`id`\: Just simple embedding, and then add to fully connected layer. +* :code:`embedding`\: + - if is_sequence, get the embedding and do a text convolutional operation, + get the average pooling result. + - if not sequence, get the embedding and add to fully connected layer. +* :code:`one_host_dense`\: + - just two fully connected layer. + +Then we combine each features of movie into one movie feature by a +:code:`fc_layer` with multiple inputs, and do the same thing to user features, +get one user feature. Then we calculate the cosine similarity of these two +features. + +In these network, we use several api in `trainer_config_helpers +<../../ui/api/trainer_config_helpers/index.html>`_. There are + +* Data Layer, `data_layer + <../../ui/api/trainer_config_helpers/layers.html#id1>`_ +* Fully Connected Layer, `fc_layer + <../../ui/api/trainer_config_helpers/layers.html#fc-layer>`_ +* Embedding Layer, `embedding_layer + <../../ui/api/trainer_config_helpers/layers.html#embedding-layer>`_ +* Context Projection Layer, `context_projection + <../../ui/api/trainer_config_helpers/layers.html#context-projection>`_ +* Pooling Layer, `pooling_layer + <../../ui/api/trainer_config_helpers/layers.html#pooling-layer>`_ +* Cosine Similarity Layer, `cos_sim + <../../ui/api/trainer_config_helpers/layers.html#cos-sim>`_ +* Text Convolution Pooling Layer, `text_conv_pool + <../../ui/api/trainer_config_helpers/networks.html + #trainer_config_helpers.networks.text_conv_pool>`_ +* Declare Python Data Sources, `define_py_data_sources + <../../ui/api/trainer_config_helpers/data_sources.html>`_ + +Data Provider +''''''''''''' + +.. include:: ../../../demo/recommendation/dataprovider.py + :code: python + :literal: + +The data provider just read the meta.bin and rating file, yield each sample for training. +In this :code:`dataprovider.py`, we should set\: + +* obj.slots\: The feature types and dimension. +* use_seq\: Whether this :code:`dataprovider.py` in sequence mode or not. +* process\: Return each sample of data to :code:`paddle`. + +The data provider details document see `there <../../ui/DataProvider.html>`_. + +Train +````` + +After prepare data, config network, writting data provider, now we can run paddle training. + +The run.sh is shown as follow: + +.. include:: ../../../demo/recommendation/run.sh + :code: bash + :literal: + +It just start a paddle training process, write the log to `log.txt`, +then print it on screen. + +Each command line argument in :code:`run.sh`, please refer to the `command line +arguments `_ page. The short description of these arguments is shown as follow. + +* config\: Tell paddle which file is neural network configuration. +* save_dir\: Tell paddle save model into './output' +* use_gpu\: Use gpu or not. Default is false. +* trainer_count\: The compute thread in one machine. +* test_all_data_in_one_period\: Test All Data during one test period. Otherwise, + will test a :code:`batch_size` data in one test period. +* log_period\: Print log after train :code:`log_period` batches. +* dot_period\: Print a :code:`.` after train :code:`dot_period` batches. +* num_passes\: Train at most :code:`num_passes`. + + + +If training process starts successfully, the output likes follow: + +.. code-block:: text + + I0601 08:07:22.832059 10549 TrainerInternal.cpp:157] Batch=100 samples=160000 AvgCost=4.13494 CurrentCost=4.13494 Eval: CurrentEval: + + I0601 08:07:50.672627 10549 TrainerInternal.cpp:157] Batch=200 samples=320000 AvgCost=3.80957 CurrentCost=3.48421 Eval: CurrentEval: + + I0601 08:08:18.877369 10549 TrainerInternal.cpp:157] Batch=300 samples=480000 AvgCost=3.68145 CurrentCost=3.42519 Eval: CurrentEval: + + I0601 08:08:46.863963 10549 TrainerInternal.cpp:157] Batch=400 samples=640000 AvgCost=3.6007 CurrentCost=3.35847 Eval: CurrentEval: + + I0601 08:09:15.413025 10549 TrainerInternal.cpp:157] Batch=500 samples=800000 AvgCost=3.54811 CurrentCost=3.33773 Eval: CurrentEval: + I0601 08:09:36.058670 10549 TrainerInternal.cpp:181] Pass=0 Batch=565 samples=902826 AvgCost=3.52368 Eval: + I0601 08:09:46.215489 10549 Tester.cpp:101] Test samples=97383 cost=3.32155 Eval: + I0601 08:09:46.215966 10549 GradientMachine.cpp:132] Saving parameters to ./output/model/pass-00000 + I0601 08:09:46.233397 10549 ParamUtil.cpp:99] save dir ./output/model/pass-00000 + I0601 08:09:46.233438 10549 Util.cpp:209] copy trainer_config.py to ./output/model/pass-00000 + I0601 08:09:46.233541 10549 ParamUtil.cpp:147] fileName trainer_config.py + +The model is saved in :code:`output/` directory. You can use :code:`Ctrl-C` to stop training whenever you want. + +Evaluate and Predict +```````````````````` + +After training several passes, you can evalute them and get the best pass. Just run + +.. code-block:: bash + + ./evalute.sh + +You will see messages like this: + +.. code-block:: text + + Best pass is 00009, error is 3.06949, which means predict get error as 0.875998002281 + evaluating from pass output/pass-00009 + +Then, you can predict what any user will rate a movie. Just run + +.. code-block:: bash + + python prediction.py 'output/pass-00009/' + +Predictor will read user input, and predict scores. It has a command-line user interface as follows: + +.. code-block:: text + + Input movie_id: 9 + Input user_id: 4 + Prediction Score is 2.56 + Input movie_id: 8 + Input user_id: 2 + Prediction Score is 3.13 diff --git a/doc/_sources/demo/semantic_role_labeling/index.txt b/doc/_sources/demo/semantic_role_labeling/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..53c817a485b3cc6d8942729eb0f1fff3d7b01adc --- /dev/null +++ b/doc/_sources/demo/semantic_role_labeling/index.txt @@ -0,0 +1,195 @@ +# Semantic Role Labelling Tutorial +Semantic role labeling (SRL) is a form of shallow semantic parsing whose goal is to discover the predicate-argument structure of each predicate in a given input sentence. SRL is useful as an intermediate step in a wide range of natural language processing tasks, such as information extraction. automatic document categorization and question answering. An instance is as following [1]: + + + [ A0 He ] [ AM-MOD would ][ AM-NEG n’t ] [ V accept] [ A1 anything of value ] from [A2 those he was writing about ]. + +- V: verb +- A0: acceptor +- A1: thing accepted +- A2: accepted-from +- A3: Attribute +- AM-MOD: modal +- AM-NEG: negation + + +Given the verb "accept", the chunks in sentence would play certain semantic roles. Here, the label scheme is from Penn Proposition Bank. + +To this date, most of the successful SRL systems are built on top of some form of parsing results where pre-defined feature templates over the syntactic structure are used. This tutorial will present an end-to-end system using deep bidirectional long short-term memory (DB-LSTM)[2] for solving the SRL task, which largely outperforms the previous state-of-the-art systems. The system regards SRL task as the sequence labelling problem. + + +## Data Description +The relevant paper[2] takes the data set in CoNLL-2005&2012 Shared Task for training and testing. Accordingto data license, the demo adopts the test data set of CoNLL-2005, which can be reached on website. + +To download and process the original data, user just need to execute the following command: + +```bash +cd data +./get_data.sh +``` +Several new files appear in the `data `directory as follows. +```bash +conll05st-release:the test data set of CoNll-2005 shared task +test.wsj.words:the Wall Street Journal data sentences +test.wsj.props: the propositional arguments +src.dict:the dictionary of words in sentences +tgt.dict:the labels dictionary +feature: the extracted features from data set +``` + + +## Training +### DB-LSTM +Please refer to the Sentiment Analysis demo to learn more about the long short-term memory unit. + +Unlike Bidirectional-LSTM that used in Sentiment Analysis demo, the DB-LSTM adopts another way to stack LSTM layer. First a standard LSTM processes the sequence in forward direction. The input and output of this LSTM layer are taken by the next LSTM layer as input, processed in reversed direction. These two standard LSTM layers compose a pair of LSTM. Then we stack LSTM layers pair after pair to obtain the deep LSTM model. + +The following figure shows a temporal expanded 2-layer DB-LSTM network. +
+![pic](./network_arch.png) +
+ + + +### Features +Two input features play an essential role in this pipeline: predicate (pred) and argument (argu). Two other features: predicate context (ctx-p) and region mark (mr) are also adopted. Because a single predicate word can not exactly describe the predicate information, especially when the same words appear more than one times in a sentence. With the predicate context, the ambiguity can be largely eliminated. Similarly, we use region mark mr = 1 to denote the argument position if it locates in the predicate context region, or mr = 0 if does not. These four simple features are all we need for our SRL system. Features of one sample with context size set to 1 is showed as following[2]: +
+![pic](./feature.jpg) +
+ +In this sample, the coresponding labelled sentence is: + +[ A1 A record date ] has [ AM-NEG n't ] been [ V set ] . + +In the demo, we adopt the feature template as above, consists of : `argument`, `predicate`, `ctx-p (p=-1,0,1)`, `mark` and use `B/I/O` scheme to label each argument. These features and labels are stored in `feature` file, and separated by `\t`. + +### Data Provider + +`dataprovider.py` is the python file to wrap data. `hook()` function is to define the data slots for network. The Six features and label are all IndexSlots. +``` +def hook(settings, word_dict, label_dict, **kwargs): + settings.word_dict = word_dict + settings.label_dict = label_dict + #all inputs are integral and sequential type + settings.slots = [ + integer_value(len(word_dict), seq_type=SequenceType.SEQUENCE), + integer_value(len(word_dict), seq_type=SequenceType.SEQUENCE), + integer_value(len(word_dict), seq_type=SequenceType.SEQUENCE), + integer_value(len(word_dict), seq_type=SequenceType.SEQUENCE), + integer_value(len(word_dict), seq_type=SequenceType.SEQUENCE), + integer_value(2, seq_type=SequenceType.SEQUENCE), + integer_value(len(label_dict), seq_type=SequenceType.SEQUENCE)]``` + +``` +The corresponding data iterator is as following: +``` +@provider(use_seq=True, init_hook=hook) +def process(obj, file_name): + with open(file_name, 'r') as fdata: + for line in fdata: + sentence, predicate, ctx_n1, ctx_0, ctx_p1, mark, label = line.strip().split('\t') + words = sentence.split() + sen_len = len(words) + word_slot = [obj.word_dict.get(w, UNK_IDX) for w in words] + + predicate_slot = [obj.word_dict.get(predicate, UNK_IDX)] * sen_len + ctx_n1_slot = [obj.word_dict.get(ctx_n1, UNK_IDX) ] * sen_len + ctx_0_slot = [obj.word_dict.get(ctx_0, UNK_IDX) ] * sen_len + ctx_p1_slot = [obj.word_dict.get(ctx_p1, UNK_IDX) ] * sen_len + + marks = mark.split() + mark_slot = [int(w) for w in marks] + + label_list = label.split() + label_slot = [obj.label_dict.get(w) for w in label_list] + + yield word_slot, predicate_slot, ctx_n1_slot, ctx_0_slot, ctx_p1_slot, mark_slot, label_slot +``` +The `process`function yield 7 lists which are six features and labels. + +### Neural Network Config +`db_lstm.py` is the neural network config file to load the dictionaries and define the data provider module and network architecture during the training procedure. + +Seven `data_layer` load instances from data provider. Six features are transformed into embedddings respectively, and mixed by `mixed_layer` . Deep bidirectional LSTM layers extract features for the softmax layer. The objective function is cross entropy of labels. + +### Run Training +The script for training is `train.sh`, user just need to execute: +```bash + ./train.sh +``` +The content in `train.sh`: +``` +paddle train \ + --config=./db_lstm.py \ + --save_dir=./output \ + --trainer_count=4 \ + --log_period=10 \ + --num_passes=500 \ + --use_gpu=false \ + --show_parameter_stats_period=10 \ + --test_all_data_in_one_period=1 \ +2>&1 | tee 'train.log' +``` + + +- \--config=./db_lstm.py : network config file. +- \--save_di=./output: output path to save models. +- \--trainer_count=4 : set thread number (or GPU count). +- \--log_period=10 : print log every 20 batches. +- \--num_passes=500: set pass number, one pass in PaddlePaddle means training all samples in dataset one time. +- \--use_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train. +- \--show_parameter_stats_period=10: show parameter statistic every 100 batches. +- \--test_all_data_in_one_period=1: test all data in every testing. + + +After training, the models will be saved in directory `output`. + +### Run testing +The script for testing is `test.sh`, user just need to execute: +```bash + ./test.sh +``` +The main part in `tesh.sh` +``` +paddle train \ + --config=./db_lstm.py \ + --model_list=$model_list \ + --job=test \ + --config_args=is_test=1 \ +``` + + - \--config=./db_lstm.py: network config file + - \--model_list=$model_list.list: model list file + - \--job=test: indicate the test job + - \--config_args=is_test=1: flag to indicate test + + +### Run prediction +The script for prediction is `predict.sh`, user just need to execute: +```bash + ./predict.sh + +``` +In `predict.sh`, user should offer the network config file, model path, label file, word dictionary file, feature file +``` +python predict.py + -c $config_file + -w $model_path + -l $label_file + -d $dict_file + -i $input_file +``` + +`predict.py` is the main executable python script, which includes functions: load model, load data, data prediction. The network model will output the probability distribution of labels. In the demo, we take the label with maximum probability as result. User can also implement the beam search or viterbi decoding upon the probability distribution matrix. + +After prediction, the result is saved in `predict.res`. + + + + + +## Reference +[1] Martha Palmer, Dan Gildea, and Paul Kingsbury. The Proposition Bank: An Annotated Corpus of Semantic Roles , Computational Linguistics, 31(1), 2005. + +[2] Zhou, Jie, and Wei Xu. "End-to-end learning of semantic role labeling using recurrent neural networks." Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. + diff --git a/doc/_sources/demo/sentiment_analysis/index.txt b/doc/_sources/demo/sentiment_analysis/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..9ee6d3a177c19de9fabf7b7e86c7c371bc094736 --- /dev/null +++ b/doc/_sources/demo/sentiment_analysis/index.txt @@ -0,0 +1,9 @@ +Sentiment Analasis Tutorial +=========================== + +.. toctree:: + :maxdepth: 3 + :glob: + + Training Locally + internal/cluster_train.md diff --git a/doc/_sources/demo/sentiment_analysis/sentiment_analysis.txt b/doc/_sources/demo/sentiment_analysis/sentiment_analysis.txt new file mode 100644 index 0000000000000000000000000000000000000000..957e85869820a8e8c2dc70f6d95d7602f305b40c --- /dev/null +++ b/doc/_sources/demo/sentiment_analysis/sentiment_analysis.txt @@ -0,0 +1,320 @@ +# Sentiment Analysis Tutorial + +Sentiment analysis has many applications. A basic task in sentiment analysis is classifying the polarity of a given text at the document, sentence or feature/aspect level. One simple example is to classify the customer reviews in a shopping website, a tourism website, and group buying websites like Amazon, TaoBao, Tmall etc. + +Sentiment analysis is also used to monitor social media based on large amount of reviews or blogs. For example, the researchers analyzed several surveys on consumer confidence and political opinion, found they correlate to sentiment word frequencies in contemporaneous Twitter messages [1]. Another example is to forecast stock movements through analyzing the text content of a daily Twitter blog [2]. + +On the other hand, grabbing the user comments of products and analyzing their sentiment are useful to understand user preferences for companies, products, even competing products. + +This tutorial will guide you through the process of training a Long Short Term Memory (LSTM) Network to classify the sentiment of sentences from [Large Movie Review Dataset](http://ai.stanford.edu/~amaas/data/sentiment/), sometimes known as the [Internet Movie Database (IMDB)](http://ai.stanford.edu/~amaas/papers/wvSent_acl2011.pdf). This dataset contains movie reviews along with their associated binary sentiment polarity labels, namely positive and negative. So randomly guessing yields 50% accuracy. + +## Data Preparation + +### IMDB Data Introduction + +Before training models, we need to preprocess the data and build a dictionary. First, you can use following script to download IMDB dataset and [Moses](http://www.statmt.org/moses/) tool, which is a statistical machine translation system. We provide a data preprocessing script, which is capable of handling not only IMDB data, but also other user-defined data. In order to use the pre-written script, it needs to move labeled train and test samples to another path, which has been done in `get_imdb.sh`. + +``` +cd demo/sentiment/data +./get_imdb.sh +``` +If the data is obtained successfuly, you will see the following files at ```./demo/sentiment/data```: + +``` +aclImdb get_imdb.sh imdb mosesdecoder-master +``` + +* aclImdb: raw dataset downloaded from website. +* imdb: only contains train and test data. +* mosesdecoder-master: Moses tool. + +IMDB dataset contains 25,000 highly polar movie reviews for training, and 25,000 for testing. A negative review has a score ≤ 4 out of 10, and a positive review has a score ≥ 7 out of 10. After running `./get_imdb.sh`, we can find the dataset has the following structure in `aclImdb`. + +``` +imdbEr.txt imdb.vocab README test train +``` +* train: train sets. +* test : test sets. +* imdb.vocab: dictionary. +* imdbEr.txt: expected rating for each token in imdb.vocab. +* README: data documentation. + +Both train and test set directory contains: + +``` +labeledBow.feat neg pos unsup unsupBow.feat urls_neg.txt urls_pos.txt urls_unsup.txt +``` + +* pos: positive samples, contains 12,500 txt files, each file is one movie review. +* neg: negative samples, contains 12,500 txt files, each file is one movie review. +* unsup: unlabeled samples, contains 50,000 txt files. +* urls_xx.txt: urls of each reviews. +* xxBow.feat: already-tokenized bag of words (BoW) features. + +### IMDB Data Preparation + +In this demo, we only use labled train and test set and not use imdb.vocab as dictionary. By default, dictionary is builded on train set. Train set is shuffled and test set is not. `tokenizer.perl` in Moses tool is used to tokenize the words and punctuation. Simply execute the following command to preprcess data. + +``` +cd demo/sentiment/ +./preprocess.sh +``` +preprocess.sh: + +``` +data_dir="./data/imdb" +python preprocess.py -i data_dir +``` + +* data_dir: input data directory. +* preprocess.py: preprocess script. + +If running successfully, you will see `demo/sentiment/data/pre-imdb` directory as follows: + +``` +dict.txt labels.list test.list test_part_000 train.list train_part_000 +``` +* test\_part\_000 and train\_part\_000: all labeled test and train sets. Train sets have be shuffled. +* train.list and test.list: train and test file lists. +* dict.txt: dictionary generated on train sets by default. +* labels.txt: neg 0, pos 1, means label 0 is negative review, label 1 is positive review. + +### User-defined Data Preparation + +If you perform other sentiment classifcation task, you can prepare data as follows. We have provided the scripts to build dictionary and preprocess data. So just organize data as follows. + +``` +dataset +|----train +| |----class1 +| | |----text_files +| |----class2 +| | |----text_files +| | ... +|----test +| |----class1 +| | |----text_files +| |----class2 +| | |----text_files +| | ... +``` +* dataset: 1st directory. +* train, test: 2nd directory. +* class1,class2,...: 3rd directory. +* text_files: samples with text file format. + +All samples with text files format under the same folder are same category. Each text file contains one or more samples and each line is one sample. In order to shuffle fully, the preprocessing is a little different for data with multiple lines in one text file, which needs to set `-m True` in `preprocess.sh`. And tokenizer.perl is used by default. If you don't need it, only set `-t False` in `preprocess.sh'. + +## Training + +In this task, we use Recurrent Neural Network (RNN) of LSTM architecure to train sentiment analysis model. LSTM model was introduced primarily in order to overcome the problem of vanishing gradients. LSTM network resembles a standard recurrent neural network with a hidden layer, but each ordinary node in the hidden layer is replaced by a memory cell. Each memory cell contains four main elements: an input gate, a neuron with a self-recurrent connection, a forget gate and an output gate. More details can be found in the literature [4]. The biggest advantage of the LSTM architecture is that it learns to memorize information over long time intervals without the loss of short time memory. At each time step with a new coming word, historical information stored in the memory block is updated to iteratively learn the sequence representation. + +
![LSTM](./lstm.png)
+
Figure 1. LSTM [3]
+ +Sentiment analysis is among the most typical problems in natural language understanding. It aims at predicting the attitude expressed in a sequence. Usually, only some key words, like adjectives and adverbs words, play a major role in predicting the sentiment of sequences or paragraphs. However, some review or comment contexts are very long, such as IMDB dataset. We use LSTM to perform this task for its improved design with the gate mechanism. First, it is able to summarize the representation from word level to context level with variable context length which is adapted by the gate values. Second, it can utilize the expanded context at the sentence level, while most methods are good at utilizing n-gram level knowledge. Third, it learns the paragraph representation directly rather than combining the context level information. This results in this end-to-end framework. + +In this demo we provide two network, namely bidirectional-LSTM and three layers of stacked-LSTM. + +#### Bidirectional-LSTM + +One is a bidirectional LSTM network, connected by fully connected layer and softmax, as shown in Figure 2. + +
![BiLSTM](./bi_lstm.jpg)
+
Figure 2. Bidirectional-LSTM
+ +#### Stacked-LSTM +Another is three-layer LSTM structure in Figure 3. The bottom of the figure is word embedding. Next, three LSTM-Hidden layers are connected and the second LSTM is reversed. Then extract the maximum hidden vectors of all time step of hidden and LSTM layer as the representation for the entire sequence. Finally, a fully connected feed forward layer with softmax activation is used to perform the classification task. This network is refered to paper [5]. + +
![StackedLSTM](./stacked_lstm.jpg)
+
Figure 3. Stacked-LSTM for sentiment analysis
+ +**Config** + +Switch into `demo/sentiment` directory, `trainer_config.py` file is an example of the config, containing algorithm and newtork configure. The first line imports predefined networks from `sentiment_net.py`. + +trainer_config.py: + +```python +from sentiment_net import * + +data_dir = "./data/pre-imdb" +# whether this config is used for test +is_test = get_config_arg('is_test', bool, False) +# whether this config is used for prediction +is_predict = get_config_arg('is_predict', bool, False) +dict_dim, class_dim = sentiment_data(data_dir, is_test, is_predict) + +################## Algorithm Config ##################### + +settings( + batch_size=128, + learning_rate=2e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(8e-4), + gradient_clipping_threshold=25 +) + +#################### Network Config ###################### +stacked_lstm_net(dict_dim, class_dim=class_dim, + stacked_num=3, is_predict=is_predict) +#bidirectional_lstm_net(dict_dim, class_dim=class_dim, is_predict=is_predict) +``` + +* **Data Definition**: + * get\_config\_arg(): get arguments setted by `--config_args=xx` in commandline argument. + * Define TrainData and TestData provider, here using Python interface (PyDataProviderWrapper) of PaddlePaddle to load data. For details, you can refer to the document of PyDataProvider. + +* **Algorithm Configuration**: + * use sgd algorithm. + * use adam optimization. + * set batch size of 128. + * set average sgd window. + * set global learning rate. +* **Network Configuration**: + * dict_dim: get dictionary dimension. + * class_dim: set category number, IMDB has two label, namely positive and negative label. + * `stacked_lstm_net`: predefined network as shown in Figure 3, use this network by default. + * `bidirectional_lstm_net`: predefined network as shown in Figure 2. + +**Training** + +Install PaddlePaddle first if necessary. Then you can use script `train.sh` as follows to launch local training. + +``` +cd demo/sentiment/ +./train.sh +``` + +train.sh: + +``` +config=trainer_config.py +output=./model_output +paddle train --config=$config \ + --save_dir=$output \ + --job=train \ + --use_gpu=false \ + --trainer_count=4 \ + --num_passes=10 \ + --log_period=20 \ + --dot_period=20 \ + --show_parameter_stats_period=100 \ + --test_all_data_in_one_period=1 \ + 2>&1 | tee 'train.log' +``` + +* --config=$config: set network config. +* --save\_dir=$output: set output path to save models. +* --job=train: set job mode to train. +* --use\_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train. +* --trainer\_count=4: set thread number (or GPU count). +* --num\_passes=15: set pass number, one pass in PaddlePaddle means training all samples in dataset one time. +* --log\_period=20: print log every 20 batches. +* --show\_parameter\_stats\_period=100: show parameter statistic every 100 batches. +* --test\_all_data\_in\_one\_period=1: test all data every testing. + +If the run succeeds, the output log is saved in path of `demo/sentiment/train.log` and model is saved in path of `demo/sentiment/model_output/`. The output log is explained as follows. + +``` +Batch=20 samples=2560 AvgCost=0.681644 CurrentCost=0.681644 Eval: classification_error_evaluator=0.36875 CurrentEval: classification_error_evaluator=0.36875 +... +Pass=0 Batch=196 samples=25000 AvgCost=0.418964 Eval: classification_error_evaluator=0.1922 +Test samples=24999 cost=0.39297 Eval: classification_error_evaluator=0.149406 +``` +- Batch=xx: means passing xx batches. +- samples=xx: means passing xx samples. +- AvgCost=xx: averaged cost from 0-th batch to current batch. +- CurrentCost=xx: current cost of latest log_period batches. +- Eval: classification\_error\_evaluator=xx: means classfication error from 0-th batch ro current batch. +- CurrentEval: classification\_error\_evaluator: current classfication error of the lates log_period batches. +- Pass=0: Going through all training set one time is called one pass. 0 means going through training set first time. + +By default, we use the `stacked_lstm_net` network, which converges at a faster rate than `bidirectional_lstm_net` when passing same sample number. If you want to use bidirectional LSTM, just remove comment in the last line and comment `stacked_lstm_net`. + +## Testing + +Testing means evaluating the labeled validation set using trained model. + +``` +cd demo/sentiment +./test.sh +``` + +test.sh: + +```bash +function get_best_pass() { + cat $1 | grep -Pzo 'Test .*\n.*pass-.*' | \ + sed -r 'N;s/Test.* error=([0-9]+\.[0-9]+).*\n.*pass-([0-9]+)/\1 \2/g' | \ + sort | head -n 1 +} + +log=train.log +LOG=`get_best_pass $log` +LOG=(${LOG}) +evaluate_pass="model_output/pass-${LOG[1]}" + +echo 'evaluating from pass '$evaluate_pass + +model_list=./model.list +touch $model_list | echo $evaluate_pass > $model_list +net_conf=trainer_config.py +paddle train --config=$net_conf \ + --model_list=$model_list \ + --job=test \ + --use_gpu=false \ + --trainer_count=4 \ + --config_args=is_test=1 \ + 2>&1 | tee 'test.log' +``` + +The function `get_best_pass` gets the best model by classification error rate for testing. In this example, We use test dataset of IMDB as validation by default. Unlike training, it needs to specify `--job=test` and model path, namely `--model_list=$model_list` here. If running successfully, the log is saved in path of `demo/sentiment/test.log`. For example, in our test, the best model is `model_output/pass-00002`, the classification error is 0.115645 as follows. + +``` +Pass=0 samples=24999 AvgCost=0.280471 Eval: classification_error_evaluator=0.115645 +``` + +## Prediction + +`predict.py` provides a predicting interface. You should install python api of PaddlePaddle before using it. One example to predict unlabeled review of IMDB is as follows. Simply running: + +``` +cd demo/sentiment +./predict.sh +``` +predict.sh: + +``` +config=trainer_config.py +model=model_output/pass-00002/ +label=data/pre-imdb/labels.list +python predict.py \ + -n $config\ + -w $model \ + -b $label \ + -d data/pre-imdb/dict.txt \ + -i data/aclImdb/test/pos/10007_10.txt +``` + +* `predict.py`: predicting interface. +* -n $config : set network configure. +* -w $model: set model path. +* -b $label: set dictionary about corresponding relation between integer label and string label. +* -d data/pre-imdb/dict.txt: set dictionary. +* -i data/aclImdb/test/pos/10014_7.txt: set one example file to predict. + +Predicting result of this example: + +``` +Loading parameters from model_output/pass-00002/ +./data/aclImdb/test/pos/10014_7.txt: predicting label is pos +``` +We sincerely appreciate your interest and welcome your contributions. + +## Reference +[1] Brendan O'Connor, Ramnath Balasubramanyan, Bryan R. Routledge, and Noah A. Smith. 2010. [From Tweets to Polls: Linking Text Sentiment to Public Opinion Time Series](http://homes.cs.washington.edu/~nasmith/papers/oconnor+balasubramanyan+routledge+smith.icwsm10.pdf). In ICWSM-2010.
+[2] Johan Bollen, Huina Mao, Xiaojun Zeng. 2011. [Twitter mood predicts the stock market](http://arxiv.org/abs/1010.3003), Journal of Computational Science.
+[3] Alex Graves, Marcus Liwicki, Santiago Fernan- dez, Roman Bertolami, Horst Bunke, and Ju ̈rgen Schmidhuber. 2009. [A novel connectionist system for unconstrained handwriting recognition. IEEE Transactions on Pattern Analysis and Machine In- telligence](http://www.cs.toronto.edu/~graves/tpami_2009.pdf), 31(5):855–868.
+[4] Zachary C. Lipton, [A Critical Review of Recurrent Neural Networks for Sequence Learning](http://arxiv.org/abs/1506.00019v1), arXiv:1506.00019.
+[5] Jie Zhou and Wei Xu; [End-to-end Learning of Semantic Role Labeling Using Recurrent Neural Networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf); ACL-IJCNLP 2015.
diff --git a/doc/_sources/demo/text_generation/index.txt b/doc/_sources/demo/text_generation/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..82da5524197ac8d4652f0e30f446b5a88bf1629d --- /dev/null +++ b/doc/_sources/demo/text_generation/index.txt @@ -0,0 +1,9 @@ +Text Generation Tutorial +======================== + +.. toctree:: + :maxdepth: 3 + :glob: + + Training Locally + internal/cluster_train.md diff --git a/doc/_sources/demo/text_generation/text_generation.txt b/doc/_sources/demo/text_generation/text_generation.txt new file mode 100644 index 0000000000000000000000000000000000000000..ee97139dd8f796aa7660efa30616a3715b280f24 --- /dev/null +++ b/doc/_sources/demo/text_generation/text_generation.txt @@ -0,0 +1,337 @@ +# Text generation Tutorial # + +Sequence to sequence has been proven to be a powerful model for language generation. It can be used for machine translation, query rewriting, image captioning, etc. + +This tutorial guides you through training a sequence to sequence model for neural machine translation (NMT) network that translates French to English. + +We follow the paper [Neural Machine Translation by Jointly Learning to Align and Translate](http://arxiv.org/abs/1409.0473) , which details the model architecture and training procedure for good performance on WMT-14 dataset. This tutorial reproduces this result in PaddlePaddle. + +We thank @caoying for the pull request that defines the model architecture and solver configurations. + +## Data Preparation ## +### Download and Extract ### +Download the WMT-14 dataset from [http://www-lium.univ-lemans.fr/~schwenk/cslm\_joint\_paper/](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/), extract it, and divide Develop and Test data into separate folder. + +- **Train data**: [bitexts (after selection)](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/bitexts.tgz) +- **Develop and Test data**: [dev+test data](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz) + +To do this, simply run the following commands in linux, otherwise, you need to download, extract, divide, and rename the file suffix respectively. + +```bash +cd demo/seqToseq/data +./wmt14_data.sh +``` + +We should find that the dataset `wmt14` has three folders as shown in the following table. + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
folder nameFrench-English parallel corpora filenumber of total filesize
train_dataccb2_pc30.src, ccb2_pc30.trg, etctwelve3.55G
test_datantst1213.src, ntst1213.trgtwo1636k
gen_datantst14.src, ntst14.trgtwo864k
+
+ +- Each folder has French-English parallel corpora +- **XXX.src** are source French files; **XXX.trg** are target English files. +- The number of lines of **XXX.src** and **XXX.trg** should be the same. +- Each line is a French/English sentence. +- There is a one-to-one correspondence between the sentence at the i-th line of **XXX.src** and **XXX.trg**. + +### User Defined Dataset ### + +If you need to do other sequence-to-sequence tasks, such as Paraphrasing, you only need to organize the data as follows, and place them in `demo/seqToseq/data`: + + dataset + train + file1.src file1.trg + file2.src file2.trg + ...... + test + file1.src file1.trg + file2.src file2.trg + ...... + gen + file1.src file1.trg + file2.src file2.trg + ...... +- 1st directory: dataset folder name +- 2nd directory: folder of train, test, and gen. The names of these three folders are fixed. +- 3rd file: Source-Target parallel corpora files. + - **XXX.src** are source files, **XXX.trg** are target files. + - Each line of the file must be a sequence. + - There should be a one-to-one correspondence between the i-th sequence of **XXX.src** and **XXX.trg**. + +## Data Preprocess ## +### Preprocessing Workflow ### +- Concat each Source-Target parallel corpora to be one file: + - concat each **XXX.src** and **XXX.trg** to be **XXX**. + - the i-th line of **XXX** = the i-th line of **XXX.src** + '\t' + the i-th line of **XXX.trg** +- Build source and target dictionary of train data, each dictionary has DICTSIZE words: + - the most frequent (DICTSIZE-3) words + - 3 special token: + - ``: the start of a sequence + - ``: the end of a sequence + - ``: a word not included in dictionary + +### Preprocessing Command and Result +The general command for preprocessing the dataset is: + +```python +cd demo/seqToseq/ +python preprocess.py -i INPUT [-d DICTSIZE] [-m] +``` + +- `-i INPUT`: the path of input original dataset +- `-d DICTSIZE`: the specified word count of dictionary, if not set, dictionary will contain all the words in input dataset +- `-m --mergeDict`: merge source and target dictionary, thus, two dictionaries have the same context + +And you will see messages like this: + + concat parallel corpora for dataset + build source dictionary for train data + build target dictionary for train data + dictionary size is XXX + +Here, you can simply run the command: + +```python +python preprocess.py -i data/wmt14 -d 30000 +``` + +It will take several minutes, and store the preprocessed dataset in `demo/seqToseq/data/pre-wmt14`, the directory has following structure. + + train test gen train.list test.list gen.list src.dict trg.dict + +- **train, test, gen**: folder contains French-English parallel corpora of train data, test data and gen data respectively. Each line of file in folder contains two parts, the former is a French sequence, and the latter is a corresponding English sequence. +- **train.list, test.list, gen.list**: text contains a file list in train folder, test folder and gen folder respectively +- **src.dict, trg.dict**: source (French) / target (English) dictionary, each dictionary has 30000 words: the most frequent 29997 words and 3 special token + +## Model Training ## +### Introduction ### + +Neural machine translation (NMT) aims at building a single neural network that can be jointly tuned to maximize translation performance. Recently proposed NMT models often belong to a family of encoder–decoder models. Encoder-Decoder models encode a source sentence into a fixed-length vector from which a decoder generates a target sentence. + +In this task, we use an extension to the encoder–decoder model which learns to align and translate jointly. Each time the model generates a word in a translation, it searches for a set of positions in the source sentence for the most relevant information. The decoder predicts a target word based on the context vectors associated with these source positions and all the previous generated target words. For more detailed explanation, readers can refer to paper [Neural Machine Translation by Jointly Learning to Align and Translate](http://arxiv.org/abs/1409.0473). + +The most distinguishing feature of this model is that it doesn't encode an input sentence into a single fixed-length vector. Instead, it encodes the input sentence into a sequence of vectors, where one vector corresponds to an input element. A subset of these vectors is chosen adaptively while decoding the translated sentence. This frees a NMT model from having to squash all the information of a source sentence, regardless of its length, into a fixed-length vector. The improvement of this model is more apparent for longer sentences, but the improvement can be observed for sentences of any length. +
![](./encoder-decoder-attention-model.png)
+
Figure 1. Encoder-Decoder-Attention-Model
+ +### Training Model in PaddlePaddle ### +We need to create a model config file before training. Here is an example `demo/seqToseq/translation/train.conf`. The first three lines import python function for defining network, and define the job_mode and attention_mode. + +```python +from seqToseq_net import * +is_generating = False + +### Data Definiation +train_conf = seq_to_seq_data(data_dir = "./data/pre-wmt14", + is_generating = is_generating) + +### Algorithm Configuration +settings( + learning_method = AdamOptimizer(), + batch_size = 50, + learning_rate = 5e-4) + +### Network Architecture +gru_encoder_decoder(train_conf, is_generating) +``` + +1. **Data Definiation**: We define a SeqToSeq train and test data in our example. It returns train_conf as the configuration, following is its input arguments: + - data_dir: directory of train data and test data + - is\_generating: whether this config is used for generating, here is false +2. **Algorithm Configuration**: We use the SGD training algorithm (default), ADAM learning method in our example, specify batch_size as 50, and learning rate as 5e-4. +3. **Network Architecture**: We use an attention version of GRU Encoder-Decoder network in our example. It consists a bidirectional GRU as an encoder and a decoder that emulates searching through a source sentence during decoding a translation. + +### Training Command and Result### +After writing the model config, we can train the model by running the command: + +```bash +cd demo/seqToseq/translation +./train.sh +``` + +The `train.sh` is shown as follows: + +```bash +paddle train \ +--config='translation/train.conf' \ +--save_dir='translation/model' \ +--use_gpu=false \ +--num_passes=16 \ +--show_parameter_stats_period=100 \ +--trainer_count=4 \ +--log_period=10 \ +--dot_period=5 \ +2>&1 | tee 'translation/train.log' +``` +- config: set config of neural network +- save_dir: set output path to save models +- use_gpu: whether to use GPU to train, here use CPU +- num_passes: set number of passes. One pass in paddle means training all samples in dataset one time +- show_parameter_stats_period: here show parameter statistic every 100 batches +- trainer_count: set number of CPU threads or GPU devices +- log_period: here print log every 10 batches +- dot_period: here print '.' every 5 batches + +The training loss function is printed every 10 batch by default, and you will see messages like this: + + I0719 19:16:45.952062 15563 TrainerInternal.cpp:160] Batch=10 samples=500 AvgCost=198.475 CurrentCost=198.475 Eval: classification_error_evaluator=0.737155 CurrentEval: classification_error_evaluator=0.737155 + I0719 19:17:56.707319 15563 TrainerInternal.cpp:160] Batch=20 samples=1000 AvgCost=157.479 CurrentCost=116.483 Eval: classification_error_evaluator=0.698392 CurrentEval: classification_error_evaluator=0.659065 + ..... +- AvgCost: Average Cost from 0th batch to current batch +- CurrentCost: Cost in current batch +- classification\_error\_evaluator(Eval): False prediction rate for each word from 0th evaluation to current evaluation +- classification\_error\_evaluator(CurrentEval): False prediction rate for each word in current evaluation + +And when the classification\_error\_evaluator is less than 0.35, the model is trained sucessfully. + +## Text Generation ## +### Introduction ### + +Generally speaking, the NMT model is conditioned on the encodings of the source sentence, and then to predict the next target word by given the current target word. In the training process, the current word is always knowns as the ground truth, by contrast. In the generating process, the current word is the output of the decoder in last time step, which is accessed to from a memory in PaddlePaddle. + +Besides, we use Beam Search to generate sequences. Beam search uses breadth-first search to build its search tree. At each level of the tree, it generates all successors of the states at the current level, sorting them in increasing order of heuristic cost. However, it only stores a predetermined number of best states at each level (called the beam size). + +### Pretrained model ### +We trained the model on a cluster with 50 nodes, each node has two 6-core CPUs. We trained 16 passes in 5 days, where each pass takes 7 hours. The model_dir has 16 sub-folder, each of which contains the whole model parameters with 202MB size. And we find pass-00012 model has the highest BLEU 27.77 (see paper [BLEU: a Method for Automatic Evaluation of Machine Translation](http://www.aclweb.org/anthology/P02-1040.pdf)). To download and extract this model, simply run the following commands in linux. + +```bash +cd demo/seqToseq/data +./wmt14_model.sh +``` + +### Generating Model in PaddlePaddle ### +We need to create a model config file before translating French sequence. Here is an example `demo/seqToseq/translation/gen.conf`, the first three lines import python function for defining network, and define the job\_mode and attention\_mode. + +```python +from seqToseq_net import * +is_generating = True + +################## Data Definiation ##################### +gen_conf = seq_to_seq_data(data_dir = "./data/pre-wmt14", + is_generating = is_generating, + gen_result = "./translation/gen_result") + +############## Algorithm Configuration ################## +settings( + learning_method = AdamOptimizer(), + batch_size = 1, + learning_rate = 0) + +################# Network configure ##################### +gru_encoder_decoder(gen_conf, is_generating) +``` + +1. **Data Definiation**: We defines an SeqToSeq gen data in our example. It returns gen_conf as the configuration, following is its input arguments: + - data\_dir: directory of gen data + - is\_generating: whether this config is used for generating, here is false + - gen\_result: file to store the generation result +2. **Algorithm Configuration**: We use SGD traing algorithm in generation, and specify batch_size as 1 (each time generate one sequence), and learning rate as 0. +3. **Network Architecture**: Essentially the same as the training model. + +### Generating Command and Result ### +After writing the model config, we can do text translation from French to English by running the command: + +```bash +cd demo/seqToseq/translation +./gen.sh +``` + +The `gen.sh` is shown as follows, unlike training, there are some different arguments to specify: + +```bash +paddle train \ +--job=test \ +--config='translation/gen.conf' \ +--save_dir='data/wmt14_model' \ +--use_gpu=true \ +--num_passes=13 \ +--test_pass=12 \ +--trainer_count=1 \ +2>&1 | tee 'translation/gen.log' +``` +- job: set job mode to test +- num_passes and test_pass: loading model parameters from test_pass to (num_passes - 1), here only loads `data/wmt14_model/pass-00012` + +You will see messages like this: + + I0706 14:48:31.178915 31441 GradientMachine.cpp:143] Loading parameters from data/wmt14_model/pass-00012 + I0706 14:48:40.012039 31441 Tester.cpp:125] Batch=100 samples=100 AvgCost=0 + I0706 14:48:48.898632 31441 Tester.cpp:125] Batch=200 samples=200 AvgCost=0 + ... + +And the generating result in `demo/seqToseq/translation/gen_result` likes: + + 0 + 0 -11.1314 The about the width of the seats while large controls are at stake + 1 -11.1519 The on the width of the seats while large controls are at stake + 2 -11.5988 The about the width of the seats while large controls are at stake . + + 1 + 0 -24.4149 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of the Dubai . + 1 -26.9524 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of Dubai ' s . + 2 -27.9574 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of Dubai ' s Dubai . + ... + +- This is the beam search result, where beam size is 3 +- '0' in 1st-line and '1' in 6th-line mean the sequence-id in gen data +- Other six lines list the beam search results + - The 2nd-column is the score of beam search (from large to small) + - The 3rd-colunm is the generating English sequence +- There is 2 special tokens: + - ``: the end of a sequence + - ``: a word not included in dictionary + +### Bleu Evalutaion ### +Human evaluations of machine translation are extensive but expensive. Paper [BLEU: a Method for Automatic Evaluation of Machine Translation](http://www.aclweb.org/anthology/P02-1040.pdf) presents a method as an automated understudy to skilled human judges which substitutes for them when there is need for quick or frequent evaluations. [Moses](http://www.statmt.org/moses/) is a statistical machine translation system, and we use [multi-bleu.perl](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/multi-bleu.perl) of it to do Bleu Evalution. To download this script, simply run the following command: + +```bash +cd demo/seqToseq/translation +./moses_bleu.sh +``` + +Since the standard translation is alrealy downloaded as `data/wmt14/gen/ntst14.trg`, we can do Bleu Evalution by running the command: + +```bash +cd demo/seqToseq/translation +./eval_bleu.sh FILE BEAMSIZE +``` + +- FILE: the generation result file +- BEAMSIZE: expand width in beam search diff --git a/doc/_sources/index.txt b/doc/_sources/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..5b5998fe054837c41af8ff98bca8cff75cb55fad --- /dev/null +++ b/doc/_sources/index.txt @@ -0,0 +1,13 @@ +PaddlePaddle Documentation +=================== + +User Guide +---------- +* [Quick Start](demo/quick_start/index_en.md) +* [Build and Installation](build/index.rst) +* [Contribute Code](build/contribute_to_paddle.md) +* [User Interface](ui/index.md) +* [Source Code Documents](source/index.md) +* [Layer Documents](layer.md) +* [Example and Demo](demo/index.md) +* [Cluster Train](cluster/index.md) diff --git a/doc/_sources/source/api/api.txt b/doc/_sources/source/api/api.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fc450202df73f5ca99c2c52f257243aa37c90d4 --- /dev/null +++ b/doc/_sources/source/api/api.txt @@ -0,0 +1,5 @@ +API +======== + +.. doxygenfile:: paddle/api/PaddleAPI.h +.. doxygenfile:: paddle/api/Internal.h diff --git a/doc/_sources/source/cuda/cuda/cuda.txt b/doc/_sources/source/cuda/cuda/cuda.txt new file mode 100644 index 0000000000000000000000000000000000000000..52f17c2b2e48aec8e6fc8d5a7e4f443ad72d96a6 --- /dev/null +++ b/doc/_sources/source/cuda/cuda/cuda.txt @@ -0,0 +1,39 @@ +Cuda +============= + +Dynamic Link Libs +-------------------------- + +hl_dso_loader.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_dso_loader.h + +GPU Resources +---------------- + +hl_cuda.ph +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda.ph + +hl_cuda.h +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda.h + +CUDA Wrapper +-------------- + +hl_cuda_cublas.h +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda_cublas.h + +hl_cuda_cudnn.h +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda_cudnn.h + +hl_cuda_cudnn.h +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda_cudnn.ph + + + + diff --git a/doc/_sources/source/cuda/cuda/index.txt b/doc/_sources/source/cuda/cuda/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..5fa38ff0fc8cea2b97262ea5493dea27b322dc1c --- /dev/null +++ b/doc/_sources/source/cuda/cuda/index.txt @@ -0,0 +1,7 @@ +CUDA +==================== + +.. toctree:: + :maxdepth: 3 + + cuda.rst diff --git a/doc/_sources/source/cuda/matrix/index.txt b/doc/_sources/source/cuda/matrix/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..63f95eb46618fd43a1140e4d857ae7e2fc89a6ae --- /dev/null +++ b/doc/_sources/source/cuda/matrix/index.txt @@ -0,0 +1,7 @@ +Matrix +==================== + +.. toctree:: + :maxdepth: 3 + + matrix.rst diff --git a/doc/_sources/source/cuda/matrix/matrix.txt b/doc/_sources/source/cuda/matrix/matrix.txt new file mode 100644 index 0000000000000000000000000000000000000000..dd4f06599c5af29a0278617ffd1bd9f6ae6b222e --- /dev/null +++ b/doc/_sources/source/cuda/matrix/matrix.txt @@ -0,0 +1,61 @@ +Matrix +======= + +Base Matrix +------------- + +hl_matrix.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix.h + +hl_matrix_base.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix_base.cuh + +hl_matrix_apply.cuh +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix_apply.cuh + +hl_matrix_ops.cuh +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix_ops.cuh + +hl_matrix_type.cuh +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix_type.cuh + +hl_sse_matrix_kernel.cuh +`````````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_sse_matrix_kernel.cuh + +hl_batch_transpose.h +`````````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_batch_transpose.h + +Sparse Matrix +-------------- + +hl_sparse.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_sparse.h + +hl_sparse.ph +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_sparse.ph + +Others +--------------- + +hl_aggregate.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_aggregate.h + +hl_table_apply.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_table_apply.h + +hl_top_k.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_top_k.h + + diff --git a/doc/_sources/source/cuda/rnn/index.txt b/doc/_sources/source/cuda/rnn/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..4913e47ba1cbc1c2b93fe3e128626a8e66aedc62 --- /dev/null +++ b/doc/_sources/source/cuda/rnn/index.txt @@ -0,0 +1,7 @@ +RNN +==================== + +.. toctree:: + :maxdepth: 3 + + rnn.rst diff --git a/doc/_sources/source/cuda/rnn/rnn.txt b/doc/_sources/source/cuda/rnn/rnn.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce8ed96692bcb79eec0e5e6ae52a8bf5f6573418 --- /dev/null +++ b/doc/_sources/source/cuda/rnn/rnn.txt @@ -0,0 +1,36 @@ +Neural Networks +================== + +Base +------- +.. doxygenfile:: paddle/cuda/include/hl_gpu.h +.. doxygenfile:: paddle/cuda/include/hl_cnn.h +.. doxygenfile:: paddle/cuda/include/hl_functions.h +.. doxygenfile:: paddle/cuda/include/hl_avx_functions.h +.. doxygenfile:: paddle/cuda/include/hl_device_functions.cuh +.. doxygenfile:: paddle/cuda/include/hl_gpu_functions.cuh + +Activation Functions +----------------------- +.. doxygenfile:: paddle/cuda/include/hl_activation_functions.h + +RNN Related APIs +----------------- + +.. doxygenfile:: paddle/cuda/include/hl_recurrent_apply.cuh +.. doxygenfile:: paddle/cuda/include/hl_sequence.h + +LSTM Model +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_lstm.h +.. dpxygenfile:: paddle/cuda/include/hl_cpu_lstm.cuh +.. doxygenfile:: paddle/cuda/include/hl_gpu_lstm.cuh +.. doxygenfile:: paddle/cuda/include/hl_lstm_ops.cuh + +GRU Model +```````````````` +.. doxygenfile:: paddle/cuda/include/hl_gru_ops.cuh +.. doxygenfile:: paddle/cuda/include/hl_cpu_gru.cuh +.. doxygenfile:: paddle/cuda/include/hl_gpu_gru.cuh + + diff --git a/doc/_sources/source/cuda/utils/index.txt b/doc/_sources/source/cuda/utils/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..7a84cbe27dd21e326add1a0a1774cbaa089e195f --- /dev/null +++ b/doc/_sources/source/cuda/utils/index.txt @@ -0,0 +1,7 @@ +Utils +==================== + +.. toctree:: + :maxdepth: 3 + + utils.rst diff --git a/doc/_sources/source/cuda/utils/utils.txt b/doc/_sources/source/cuda/utils/utils.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ea3e5404aa5fc792075aa09c7fd7a1986332c79 --- /dev/null +++ b/doc/_sources/source/cuda/utils/utils.txt @@ -0,0 +1,23 @@ +Utilities +=========== + +HPPL Base +------------ + +hl_base.h +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_base.h + +Timer +----------- + +hl_time.h +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_time.h + +Thread Resource +----------- + +hl_thread.ph +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_thread.ph diff --git a/doc/_sources/source/gserver/activations/index.txt b/doc/_sources/source/gserver/activations/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..ed6200d9a6c12cce0d6edb2a749c91d860d7fc2f --- /dev/null +++ b/doc/_sources/source/gserver/activations/index.txt @@ -0,0 +1,5 @@ +Activations +============= + +.. doxygenfile:: paddle/gserver/activations/ActivationFunction.h +.. doxygenfile:: paddle/gserver/activations/ActivationFunction.cpp diff --git a/doc/_sources/source/gserver/dataprovider/dataproviders.txt b/doc/_sources/source/gserver/dataprovider/dataproviders.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d2ace177b97a1735314ad58703498354b16dd67 --- /dev/null +++ b/doc/_sources/source/gserver/dataprovider/dataproviders.txt @@ -0,0 +1,14 @@ +Data Providers +================ + +Data Provider +--------------- +.. doxygenfile:: paddle/gserver/dataproviders/DataProvider.h +.. doxygenfile:: paddle/gserver/dataproviders/PyDataProvider2.cpp +.. doxygenfile:: paddle/gserver/dataproviders/DataProviderGroup.h +.. doxygenfile:: paddle/gserver/dataproviders/MultiDataProvider.h + +Proto Data Provider +-------------------- +.. doxygenfile:: paddle/gserver/dataproviders/ProtoDataProvider.h +.. doxygenfile:: paddle/gserver/dataproviders/ProtoReader.h diff --git a/doc/_sources/source/gserver/dataprovider/index.txt b/doc/_sources/source/gserver/dataprovider/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f6077f1224f90f693515d3414da4d96dc652345 --- /dev/null +++ b/doc/_sources/source/gserver/dataprovider/index.txt @@ -0,0 +1,7 @@ +Data Providers Documents +========================== + +.. toctree:: + :maxdepth: 3 + + dataproviders.rst diff --git a/doc/_sources/source/gserver/evaluators/index.txt b/doc/_sources/source/gserver/evaluators/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..d7f622ff826033c3689564d728c272f8d5618273 --- /dev/null +++ b/doc/_sources/source/gserver/evaluators/index.txt @@ -0,0 +1,8 @@ +Evaluators +============ + +.. doxygenfile:: paddle/gserver/evaluators/Evaluator.h +.. doxygenfile:: paddle/gserver/evaluators/ChunkEvaluator.cpp +.. doxygenfile:: paddle/gserver/evaluators/CTCErrorEvaluator.cpp + + diff --git a/doc/_sources/source/gserver/gradientmachines/gradientmachines.txt b/doc/_sources/source/gserver/gradientmachines/gradientmachines.txt new file mode 100644 index 0000000000000000000000000000000000000000..b3009f274e055d9f538cf4a8f51d50069290899d --- /dev/null +++ b/doc/_sources/source/gserver/gradientmachines/gradientmachines.txt @@ -0,0 +1,20 @@ +Gradient machines +=================== + +Networks +------------ +.. doxygenfile:: paddle/gserver/gradientmachines/MultiNetwork.h +.. doxygenfile:: paddle/gserver/gradientmachines/ParallelNeuralNetwork.h + +Gradient Machines +-------------------- +.. doxygenfile:: paddle/gserver/gradientmachines/GradientMachine.h +.. doxygenfile:: paddle/gserver/gradientmachines/MultiGradientMachine.h + +Recurrent Gradient Machines +----------------------------- +.. doxygenfile:: paddle/gserver/gradientmachines/RecurrentGradientMachine.h +.. doxygenfile:: paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp + + + diff --git a/doc/_sources/source/gserver/gradientmachines/index.txt b/doc/_sources/source/gserver/gradientmachines/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..997c29a102f53c165c70ff11cd9650b83bcecf44 --- /dev/null +++ b/doc/_sources/source/gserver/gradientmachines/index.txt @@ -0,0 +1,7 @@ +Gradient Machines Documents +============================= + +.. toctree:: + :maxdepth: 3 + + gradientmachines.rst diff --git a/doc/_sources/source/gserver/layers/index.txt b/doc/_sources/source/gserver/layers/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..559c5436b10a5977ac347611639b32d43f1ed123 --- /dev/null +++ b/doc/_sources/source/gserver/layers/index.txt @@ -0,0 +1,7 @@ +Layers Documents +==================== + +.. toctree:: + :maxdepth: 3 + + layer.rst diff --git a/doc/_sources/source/gserver/layers/layer.txt b/doc/_sources/source/gserver/layers/layer.txt new file mode 100644 index 0000000000000000000000000000000000000000..a864e18b9fbae1afda254eec44ae2db8d1720de5 --- /dev/null +++ b/doc/_sources/source/gserver/layers/layer.txt @@ -0,0 +1,525 @@ +Base +====== + +Layer +----- +.. doxygenclass:: paddle::Layer + :members: + +Projection +---------- +.. doxygenclass:: paddle::Projection + :members: + +Operator +-------- +.. doxygenclass:: paddle::Operator + :members: + +Data Layer +=========== + +.. doxygenclass:: paddle::DataLayer + :members: + +Fully Connected Layers +====================== + +FullyConnectedLayer +------------------- +.. doxygenclass:: paddle::FullyConnectedLayer + :members: + +SelectiveFullyConnectedLayer +---------------------------- +.. doxygenclass:: paddle::SelectiveFullyConnectedLayer + :members: + +Conv Layers +=========== + +ConvBaseLayer +------------- +.. doxygenclass:: paddle::ConvBaseLayer + :members: + +ConvOperator +------------ +.. doxygenclass:: paddle::ConvOperator + :members: + +ConvShiftLayer +-------------- +.. doxygenclass:: paddle::ConvShiftLayer + :members: + +CudnnConvLayer +-------------- +.. doxygenclass:: paddle::CudnnConvLayer + :members: + +ExpandConvLayer +--------------- +.. doxygenclass:: paddle::ExpandConvLayer + :members: + +ContextProjection +----------------- +.. doxygenclass:: paddle::ContextProjection + :members: + +Pooling Layers +============== + +PoolLayer +--------- +.. doxygenclass:: paddle::PoolLayer + :members: + +PoolProjectionLayer +------------------- +.. doxygenclass:: paddle::PoolProjectionLayer + :members: + +CudnnPoolLayer +-------------- +.. doxygenclass:: paddle::CudnnPoolLayer + :members: + +Norm Layers +=========== + +NormLayer +--------- +.. doxygenclass:: paddle::NormLayer + :members: + +CMRProjectionNormLayer +---------------------- +.. doxygenclass:: paddle::CMRProjectionNormLayer + :members: + +DataNormLayer +------------- +.. doxygenclass:: paddle::DataNormLayer + :members: + +ResponseNormLayer +----------------- +.. doxygenclass:: paddle::ResponseNormLayer + :members: + +BatchNormBaseLayer +------------------ +.. doxygenclass:: paddle::BatchNormBaseLayer + :members: + +BatchNormalizationLayer +----------------------- +.. doxygenclass:: paddle::BatchNormalizationLayer + :members: + +CudnnBatchNormLayer +----------------------- +.. doxygenclass:: paddle::CudnnBatchNormLayer + :members: + +SumToOneNormLayer +----------------- +.. doxygenclass:: paddle::SumToOneNormLayer + :members: + +Activation Layer +================ + +ParameterReluLayer +------------------ +.. doxygenclass:: paddle::ParameterReluLayer + :members: + +Recurrent Layers +================ + +RecurrentLayer +-------------- +.. doxygenclass:: paddle::RecurrentLayer + :members: + +SequenceToBatch +--------------- +.. doxygenclass:: paddle::SequenceToBatch + :members: + +LSTM +---- +LstmLayer +````````` +.. doxygenclass:: paddle::LstmLayer + :members: + +LstmStepLayer +````````````` +.. doxygenclass:: paddle::LstmStepLayer + :members: + +LstmCompute +``````````` +.. doxygenclass:: paddle::LstmCompute + :members: + +MDLSTM +------ +MDLstmLayer +``````````` +.. doxygenclass:: paddle::MDLstmLayer + :members: + +CoordIterator +````````````` +.. doxygenclass:: paddle::CoordIterator + :members: + +GRU +--- +GatedRecurrentLayer +``````````````````` +.. doxygenclass:: paddle::GatedRecurrentLayer + :members: + +GruStepLayer +```````````` +.. doxygenclass:: paddle::GruStepLayer + :members: + +GruCompute +`````````` +.. doxygenclass:: paddle::GruCompute + :members: + + +Recurrent Layer Group +===================== + +AgentLayer +---------- +.. doxygenclass:: paddle::AgentLayer + :members: + +SequenceAgentLayer +------------------ +.. doxygenclass:: paddle::SequenceAgentLayer + :members: + +GatherAgentLayer +---------------- +.. doxygenclass:: paddle::GatherAgentLayer + :members: + +SequenceGatherAgentLayer +------------------------ +.. doxygenclass:: paddle::SequenceGatherAgentLayer + :members: + +ScatterAgentLayer +----------------- +.. doxygenclass:: paddle::ScatterAgentLayer + :members: + +SequenceScatterAgentLayer +------------------------- +.. doxygenclass:: paddle::SequenceScatterAgentLayer + :members: + +GetOutputLayer +-------------- +.. doxygenclass:: paddle::GetOutputLayer + :members: + +Mixed Layer +=========== +.. doxygenclass:: paddle::MixedLayer + :members: + +DotMulProjection +---------------- +.. doxygenclass:: paddle::DotMulProjection + :members: + +DotMulOperator +-------------- +.. doxygenclass:: paddle::DotMulOperator + :members: + +FullMatrixProjection +-------------------- +.. doxygenclass:: paddle::FullMatrixProjection + :members: + +IdentityProjection +------------------ +.. doxygenclass:: paddle::IdentityProjection + :members: + +IdentityOffsetProjection +------------------------ +.. doxygenclass:: paddle::IdentityOffsetProjection + :members: + +TableProjection +--------------- +.. doxygenclass:: paddle::TableProjection + :members: + +TransposedFullMatrixProjection +------------------------------ +.. doxygenclass:: paddle::TransposedFullMatrixProjection + :members: + +Aggregate Layers +================ + +Aggregate +--------- +AverageLayer +```````````` +.. doxygenclass:: paddle::AverageLayer + :members: + +MaxLayer +```````` +.. doxygenclass:: paddle::MaxLayer + :members: + +SequenceLastInstanceLayer +````````````````````````` +.. doxygenclass:: paddle::SequenceLastInstanceLayer + :members: + +Concat +------ +ConcatenateLayer +```````````````` +.. doxygenclass:: paddle::ConcatenateLayer + :members: + +ConcatenateLayer2 +````````````````` +.. doxygenclass:: paddle::ConcatenateLayer2 + :members: + +SequenceConcatLayer +``````````````````` +.. doxygenclass:: paddle::SequenceConcatLayer + :members: + +Subset +------ +SubSequenceLayer +```````````````` +.. doxygenclass:: paddle::SubSequenceLayer + :members: + +Reshaping Layers +================ + +BlockExpandLayer +---------------- +.. doxygenclass:: paddle::BlockExpandLayer + :members: + +ExpandLayer +----------- +.. doxygenclass:: paddle::ExpandLayer + :members: + +FeatureMapExpandLayer +--------------------- +.. doxygenclass:: paddle::FeatureMapExpandLayer + :members: + +ResizeLayer +----------- +.. doxygenclass:: paddle::ResizeLayer + :members: + +SequenceReshapeLayer +-------------------- +.. doxygenclass:: paddle::SequenceReshapeLayer + :members: + +Math Layers +=========== + +AddtoLayer +---------- +.. doxygenclass:: paddle::AddtoLayer + :members: + +ConvexCombinationLayer +---------------------- +.. doxygenclass:: paddle::ConvexCombinationLayer + :members: + +InterpolationLayer +------------------ +.. doxygenclass:: paddle::InterpolationLayer + :members: + +MultiplexLayer +-------------- +.. doxygenclass:: paddle::MultiplexLayer + :members: + +OuterProdLayer +-------------- +.. doxygenclass:: paddle::OuterProdLayer + :members: + +PowerLayer +---------- +.. doxygenclass:: paddle::PowerLayer + :members: + +ScalingLayer +------------ +.. doxygenclass:: paddle::ScalingLayer + :members: + +SlopeInterceptLayer +------------------- +.. doxygenclass:: paddle::SlopeInterceptLayer + :members: + +TensorLayer +------------ +.. doxygenclass:: paddle::TensorLayer + :members: + +TransLayer +---------- +.. doxygenclass:: paddle::TransLayer + :members: + +Sampling Layers +=============== + +MultinomialSampler +------------------ +.. doxygenclass:: paddle::MultinomialSampler + :members: + +MaxIdLayer +---------- +.. doxygenclass:: paddle::MaxIdLayer + :members: + +SamplingIdLayer +--------------- +.. doxygenclass:: paddle::SamplingIdLayer + :members: + +Cost Layers +=========== + +CostLayer +----------- +.. doxygenclass:: paddle::CostLayer + :members: + +HuberTwoClass +````````````` +.. doxygenclass:: paddle::HuberTwoClass + :members: + +LambdaCost +``````````` +.. doxygenclass:: paddle::LambdaCost + :members: + +MultiBinaryLabelCrossEntropy +```````````````````````````` +.. doxygenclass:: paddle::MultiBinaryLabelCrossEntropy + :members: + +MultiClassCrossEntropy +``````````````````````` +.. doxygenclass:: paddle::MultiClassCrossEntropy + :members: + +MultiClassCrossEntropyWithSelfNorm +`````````````````````````````````` +.. doxygenclass:: paddle::MultiClassCrossEntropyWithSelfNorm + :members: + +RankingCost +``````````` +.. doxygenclass:: paddle::RankingCost + :members: + +SoftBinaryClassCrossEntropy +``````````````````````````` +.. doxygenclass:: paddle::SoftBinaryClassCrossEntropy + :members: + +SumOfSquaresCostLayer +````````````````````` +.. doxygenclass:: paddle::SumOfSquaresCostLayer + :members: + +CosSimLayer +----------- +.. doxygenclass:: paddle::CosSimLayer + :members: + +CosSimVecMatLayer +----------------- +.. doxygenclass:: paddle::CosSimVecMatLayer + :members: + +CRFDecodingLayer +---------------- +.. doxygenclass:: paddle::CRFDecodingLayer + :members: + +CRFLayer +-------- +.. doxygenclass:: paddle::CRFLayer + :members: + +CTCLayer +-------- +.. doxygenclass:: paddle::CTCLayer + :members: + +HierarchicalSigmoidLayer +------------------------ +.. doxygenclass:: paddle::HierarchicalSigmoidLayer + :members: + +LinearChainCRF +-------------- +.. doxygenclass:: paddle::LinearChainCRF + :members: + +LinearChainCTC +-------------- +.. doxygenclass:: paddle::LinearChainCTC + :members: + +NCELayer +-------- +.. doxygenclass:: paddle::NCELayer + :members: + +ValidationLayer +--------------- +.. doxygenclass:: paddle::ValidationLayer + :members: + +Check Layers +============ + +EosIdCheckLayer +--------------- +.. doxygenclass:: paddle::EosIdCheckLayer + :members: diff --git a/doc/_sources/source/index.txt b/doc/_sources/source/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..669362a1c3ad5ac10d9e74ffdafab9f21fd660fd --- /dev/null +++ b/doc/_sources/source/index.txt @@ -0,0 +1,53 @@ +# Source Code Documents + +## cuda + +- [CUDA](cuda/cuda/index.rst) +- [Matrix](cuda/matrix/index.rst) +- [RNN](cuda/rnn/index.rst) +- [Utils](cuda/utils/index.rst) + +## gserver + +- [Activations](gserver/activations/index.rst) +- [Data Providers](gserver/dataprovider/index.rst) +- [Evaluators](gserver/evaluators/index.rst) +- [Gradient Machines](gserver/gradientmachines/index.rst) +- [Layers](gserver/layers/index.rst) + +## math + +- [Matrix](math/matrix/index.rst) +- [Utils](math/utils/index.rst) + +## parameter + +- [Parameter](parameter/parameter/index.rst) +- [Update](parameter/update/index.rst) +- [Optimizer](parameter/optimizer/index.rst) + +## pserver + +- [Client](pserver/client/index.rst) +- [Network](pserver/network/index.rst) +- [Server](pserver/server/index.rst) + +## trainer + +- [Trainer](trainer/trainer.rst) + +## api + +- [API](api/api.rst) + +## utils + +- [CustomStackTrace](utils/customStackTrace.rst) +- [Enumeration wrapper](utils/enum.rst) +- [Lock](utils/lock.rst) +- [Queue](utils/queue.rst) +- [Thread](utils/thread.rst) + +## proto + +TBD diff --git a/doc/_sources/source/math/matrix/index.txt b/doc/_sources/source/math/matrix/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..68410f2a27b68c87087f2c17de351495ac6a6cd0 --- /dev/null +++ b/doc/_sources/source/math/matrix/index.txt @@ -0,0 +1,7 @@ +Matrix Documents +==================== + +.. toctree:: + :maxdepth: 3 + + matrix.rst diff --git a/doc/_sources/source/math/matrix/matrix.txt b/doc/_sources/source/math/matrix/matrix.txt new file mode 100644 index 0000000000000000000000000000000000000000..b12e3934f4705d4a2b7d3d790873701ddfe27d9f --- /dev/null +++ b/doc/_sources/source/math/matrix/matrix.txt @@ -0,0 +1,20 @@ +Matrix +======= + +Base +-------- +.. doxygenfile:: paddle/math/BaseMatrix.h + +Sparse Matrix +---------------- +.. doxygenfile:: paddle/math/Matrix.h +.. doxygenfile:: paddle/math/Vector.h +.. doxygenfile:: paddle/math/MathUtils.h +.. doxygenfile:: paddle/math/SparseMatrix.h +.. doxygenfile:: paddle/math/SparseRowMatrix.h +.. doxygenfile:: paddle/math/CpuSparseMatrix.h + +Others +---------- +.. doxygenfile:: paddle/math/MathFunctions.h +.. doxygenfile:: paddle/math/SIMDFunctions.h diff --git a/doc/_sources/source/math/utils/index.txt b/doc/_sources/source/math/utils/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..e5fe335da29b957706ed52662682d11c425e5908 --- /dev/null +++ b/doc/_sources/source/math/utils/index.txt @@ -0,0 +1,7 @@ +Utils Documents +==================== + +.. toctree:: + :maxdepth: 3 + + utils.rst diff --git a/doc/_sources/source/math/utils/utils.txt b/doc/_sources/source/math/utils/utils.txt new file mode 100644 index 0000000000000000000000000000000000000000..e00dc6229c15e1ec3a23d2a78c4094523836b030 --- /dev/null +++ b/doc/_sources/source/math/utils/utils.txt @@ -0,0 +1,13 @@ +Utils +======= + +Bits +------- +.. doxygenfile:: paddle/math/Bits.h + +Memory Handle +-------------- +.. doxygenfile:: paddle/math/MemoryHandle.h +.. doxygenfile:: paddle/math/Allocator.h +.. doxygenfile:: paddle/math/PoolAllocator.h +.. doxygenfile:: paddle/math/Storage.h diff --git a/doc/_sources/source/parameter/optimizer/index.txt b/doc/_sources/source/parameter/optimizer/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..3338af5608a03ee853e3a5f16d2483b810215514 --- /dev/null +++ b/doc/_sources/source/parameter/optimizer/index.txt @@ -0,0 +1,7 @@ +Parameter Documents +==================== + +.. toctree:: + :maxdepth: 3 + + optimizer.rst diff --git a/doc/_sources/source/parameter/optimizer/optimizer.txt b/doc/_sources/source/parameter/optimizer/optimizer.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d9e49217eb17541c14d8d64715278e62c99d2b4 --- /dev/null +++ b/doc/_sources/source/parameter/optimizer/optimizer.txt @@ -0,0 +1,7 @@ +Optimizer +============ + +.. doxygenfile:: paddle/parameter/FirstOrderOptimizer.h +.. doxygenfile:: paddle/parameter/AverageOptimizer.h +.. doxygenfile:: paddle/parameter/ParameterOptimizer.h +.. doxygenfile:: paddle/parameter/OptimizerWithRegularizer.h diff --git a/doc/_sources/source/parameter/parameter/index.txt b/doc/_sources/source/parameter/parameter/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..e7ed70ec4c87b3613cd8450f1e7fca1fb974afca --- /dev/null +++ b/doc/_sources/source/parameter/parameter/index.txt @@ -0,0 +1,7 @@ +Parameter Documents +==================== + +.. toctree:: + :maxdepth: 3 + + parameter.rst diff --git a/doc/_sources/source/parameter/parameter/parameter.txt b/doc/_sources/source/parameter/parameter/parameter.txt new file mode 100644 index 0000000000000000000000000000000000000000..2b7afdb4093753598d73c686b1dc81b970d199d5 --- /dev/null +++ b/doc/_sources/source/parameter/parameter/parameter.txt @@ -0,0 +1,16 @@ +Parameter +============= + +Weight +-------- +.. doxygenfile:: paddle/parameter/Weight.h + +Regularizer +------------ +.. doxygenfile:: paddle/parameter/Regularizer.h + +Parameter +------------- +.. doxygenfile:: paddle/parameter/Argument.h +.. doxygenfile:: paddle/parameter/Parameter.h +.. doxygenfile:: paddle/parameter/ParallelParameter.h diff --git a/doc/_sources/source/parameter/update/index.txt b/doc/_sources/source/parameter/update/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..1bbd73319396e7b8ea32c78e0fe3569919bacf2d --- /dev/null +++ b/doc/_sources/source/parameter/update/index.txt @@ -0,0 +1,7 @@ +Parameter Documents +==================== + +.. toctree:: + :maxdepth: 3 + + update.rst diff --git a/doc/_sources/source/parameter/update/update.txt b/doc/_sources/source/parameter/update/update.txt new file mode 100644 index 0000000000000000000000000000000000000000..c417602f0338dbd84ae2bd2ca4eb09330202a0e8 --- /dev/null +++ b/doc/_sources/source/parameter/update/update.txt @@ -0,0 +1,7 @@ +Update +========== + +.. doxygenfile:: paddle/parameter/ParameterUpdaterBase.h +.. doxygenfile:: paddle/parameter/ParameterUpdaterHook.h +.. doxygenfile:: paddle/parameter/ParameterUpdateFunctions.h + diff --git a/doc/_sources/source/pserver/client/client.txt b/doc/_sources/source/pserver/client/client.txt new file mode 100644 index 0000000000000000000000000000000000000000..fc7ed90d3dc8beb0baa30d63ccc956fbba2a4e4c --- /dev/null +++ b/doc/_sources/source/pserver/client/client.txt @@ -0,0 +1,14 @@ +Client +========= + +.. doxygenclass:: paddle::BaseClient + :members: + :protected-members: + :private-members: + :undoc-members: + +.. doxygenclass:: paddle::ParameterClient2 + :members: + :protected-members: + :private-members: + :undoc-members: diff --git a/doc/_sources/source/pserver/client/index.txt b/doc/_sources/source/pserver/client/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..dc924c9ca8e7b9965638fd299dc2f5e78591c91b --- /dev/null +++ b/doc/_sources/source/pserver/client/index.txt @@ -0,0 +1,7 @@ +Client Documents +==================== + +.. toctree:: + :maxdepth: 3 + + client.rst diff --git a/doc/_sources/source/pserver/network/index.txt b/doc/_sources/source/pserver/network/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..2fdf95e17d339d69de8e027d92cbb385e2bd51ec --- /dev/null +++ b/doc/_sources/source/pserver/network/index.txt @@ -0,0 +1,7 @@ +Network Documents +==================== + +.. toctree:: + :maxdepth: 3 + + network.rst diff --git a/doc/_sources/source/pserver/network/network.txt b/doc/_sources/source/pserver/network/network.txt new file mode 100644 index 0000000000000000000000000000000000000000..e000ff8dbbdc37e9d638d18d20a8ba53e21dd245 --- /dev/null +++ b/doc/_sources/source/pserver/network/network.txt @@ -0,0 +1,42 @@ +Network +========== + +Socket Server +---------------- +.. doxygenclass:: paddle::SocketServer + :members: + :protected-members: + :private-members: + :undoc-members: + +Socket Worker +---------------- +.. doxygenclass:: paddle::SocketWorker + :members: + :protected-members: + :private-members: + :undoc-members: + +Socket Client +---------------- +.. doxygenclass:: paddle::SocketClient + :members: + :protected-members: + :private-members: + :undoc-members: + +Socket Channel +--------------- +.. doxygenclass:: paddle::SocketChannel + :members: + :protected-members: + :private-members: + :undoc-members: + +Message Reader +--------------- +.. doxygenclass:: paddle::MsgReader + :members: + :protected-members: + :private-members: + :undoc-members: diff --git a/doc/_sources/source/pserver/server/index.txt b/doc/_sources/source/pserver/server/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..09e3530bfeaf56ebbadb1694a69a036813e8970f --- /dev/null +++ b/doc/_sources/source/pserver/server/index.txt @@ -0,0 +1,7 @@ +Server Documents +==================== + +.. toctree:: + :maxdepth: 3 + + server.rst diff --git a/doc/_sources/source/pserver/server/server.txt b/doc/_sources/source/pserver/server/server.txt new file mode 100644 index 0000000000000000000000000000000000000000..f3110fdd731d246ce4211d05e32ddd98584bdbb7 --- /dev/null +++ b/doc/_sources/source/pserver/server/server.txt @@ -0,0 +1,14 @@ +Server +========== + +.. doxygenclass:: paddle::ProtoServer + :members: + :protected-members: + :private-members: + :undoc-members: + +.. doxygenclass:: paddle::ParameterServer2 + :members: + :protected-members: + :private-members: + :undoc-members: diff --git a/doc/_sources/source/trainer/trainer.txt b/doc/_sources/source/trainer/trainer.txt new file mode 100644 index 0000000000000000000000000000000000000000..12c24597e7f99cd489204602ae25a89d7b960630 --- /dev/null +++ b/doc/_sources/source/trainer/trainer.txt @@ -0,0 +1,32 @@ +Trainer +======= + +TrainerStats +------------ + +.. doxygenclass:: paddle::TrainerStats + :members: + +RemoteParameterUpdater +----------------------- + +.. doxygenclass:: paddle::RemoteParameterUpdater + :members: + +ConcurrentRemoteParameterUpdater +--------------------------------- + +.. doxygenclass:: paddle::ConcurrentRemoteParameterUpdater + :members: + +SparseRemoteParameterUpdater +---------------------------- + +.. doxygenclass:: paddle::SparseRemoteParameterUpdater + :members: + +SparseRemoteParameterUpdaterComposite +------------------------------------- + +.. doxygenclass:: paddle::SparseRemoteParameterUpdaterComposite + :members: diff --git a/doc/_sources/source/utils/customStackTrace.txt b/doc/_sources/source/utils/customStackTrace.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4e6f05a406f33256548fc0ef32bbbf3daff1536 --- /dev/null +++ b/doc/_sources/source/utils/customStackTrace.txt @@ -0,0 +1,9 @@ +CustomStackTrace +================ + + +class CustomStackTrace +---------------------- + +.. doxygenclass:: paddle::CustomStackTrace + :members: diff --git a/doc/_sources/source/utils/enum.txt b/doc/_sources/source/utils/enum.txt new file mode 100644 index 0000000000000000000000000000000000000000..17166d35f7cfa63e51058cc5f86165b1e22bbe1e --- /dev/null +++ b/doc/_sources/source/utils/enum.txt @@ -0,0 +1,9 @@ +enumeration_wrapper +=================== + + +namespace paddle::enumeration_wrapper +------------------------------------- + +.. doxygennamespace:: paddle::enumeration_wrapper + diff --git a/doc/_sources/source/utils/lock.txt b/doc/_sources/source/utils/lock.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b027e403f49fc1720904cf4b502d81e4148e1e3 --- /dev/null +++ b/doc/_sources/source/utils/lock.txt @@ -0,0 +1,37 @@ +Thread +====== + + +class Thread +------------ + +.. doxygenclass:: paddle::Thread + :members: + + +class ThreadWorker +------------------ + +.. doxygenclass:: paddle::ThreadWorker + :members: + + +class SyncThreadPool +-------------------- + +.. doxygenclass:: paddle::SyncThreadPool + :members: + + +class MultiThreadWorker +----------------------- + +.. doxygenclass:: paddle::MultiThreadWorker + :members: + + +class AsyncThreadPool +--------------------- + +.. doxygenclass:: paddle::AsyncThreadPool + :members: diff --git a/doc/_sources/source/utils/queue.txt b/doc/_sources/source/utils/queue.txt new file mode 100644 index 0000000000000000000000000000000000000000..72a464ca67288d0d0e24980d59c3bbc85f111081 --- /dev/null +++ b/doc/_sources/source/utils/queue.txt @@ -0,0 +1,16 @@ +Queue +===== + + +class Queue +------------ + +.. doxygenclass:: paddle::Queue + :members: + + +class BlockingQueue +------------------- + +.. doxygenclass:: paddle::BlockingQueue + :members: diff --git a/doc/_sources/source/utils/thread.txt b/doc/_sources/source/utils/thread.txt new file mode 100644 index 0000000000000000000000000000000000000000..2eb67dde6a945cc8e250989f7fc8cefed942950e --- /dev/null +++ b/doc/_sources/source/utils/thread.txt @@ -0,0 +1,40 @@ +Lock +==== + + +class RWLock +------------ + +.. doxygenclass:: paddle::RWLock + :members: + +class ReadLockGuard +------------------- + +.. doxygenclass:: paddle::ReadLockGuard + :members: + +class SpinLock +-------------- + +.. doxygenclass:: paddle::SpinLock + :members: + +class Semaphore +--------------- + +.. doxygenclass:: paddle::Semaphore + :members: + +class ThreadBarrier +------------------- + +.. doxygenclass:: paddle::ThreadBarrier + :members: + +class LockedCondition +--------------------- + +.. doxygenclass:: paddle::LockedCondition + :members: + diff --git a/doc/_sources/ui/api/py_data_provider_wrapper.txt b/doc/_sources/ui/api/py_data_provider_wrapper.txt new file mode 100644 index 0000000000000000000000000000000000000000..91222dd96819f54ef3f1af0a475549937b480476 --- /dev/null +++ b/doc/_sources/ui/api/py_data_provider_wrapper.txt @@ -0,0 +1,6 @@ +PyDataProviderWrapper API +========================= + + +.. automodule:: paddle.trainer.PyDataProviderWrapper + :members: diff --git a/doc/_sources/ui/api/trainer_config_helpers/activations.txt b/doc/_sources/ui/api/trainer_config_helpers/activations.txt new file mode 100644 index 0000000000000000000000000000000000000000..fea420f5926560d784fd2c77a25b1e41c95fac27 --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/activations.txt @@ -0,0 +1,5 @@ +Activations +=========== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: diff --git a/doc/_sources/ui/api/trainer_config_helpers/attrs.txt b/doc/_sources/ui/api/trainer_config_helpers/attrs.txt new file mode 100644 index 0000000000000000000000000000000000000000..44919aba90df0b9da7c311a62339052c16c44ad1 --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/attrs.txt @@ -0,0 +1,5 @@ +Parameter and Extra Layer Attribute +=================================== + +.. automodule:: paddle.trainer_config_helpers.attrs + :members: diff --git a/doc/_sources/ui/api/trainer_config_helpers/data_sources.txt b/doc/_sources/ui/api/trainer_config_helpers/data_sources.txt new file mode 100644 index 0000000000000000000000000000000000000000..44ea59df43762508e86c7b867fcf136d84c8351e --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/data_sources.txt @@ -0,0 +1,5 @@ +DataSources +=========== + +.. automodule:: paddle.trainer_config_helpers.data_sources + :members: diff --git a/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt b/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt new file mode 100644 index 0000000000000000000000000000000000000000..fdcf3d303ea49197b78fc28c64a9d732b9304241 --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt @@ -0,0 +1,5 @@ +Evaluators +========== + +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: diff --git a/doc/_sources/ui/api/trainer_config_helpers/index.txt b/doc/_sources/ui/api/trainer_config_helpers/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..a439e7a8ccabd72a7818999adcf0e7977dcd06b5 --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/index.txt @@ -0,0 +1,10 @@ +# Trainer Config Helpers + +* [Optimizer](optimizers.rst) +* [Data Source](data_sources.rst) +* [Layers](layers_index.rst) +* [Activations](activations.rst) +* [Poolings](poolings.rst) +* [Networks](networks.rst) +* [Evaluators](evaluators.rst) +* [Parameter and Extra Layer Attribute](attrs.rst) diff --git a/doc/_sources/ui/api/trainer_config_helpers/layers.txt b/doc/_sources/ui/api/trainer_config_helpers/layers.txt new file mode 100644 index 0000000000000000000000000000000000000000..8051d297161568a39836e0e8d29a2a41f35acd01 --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/layers.txt @@ -0,0 +1,375 @@ +Base +====== + +LayerType +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: LayerType + :noindex: + +LayerOutput +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: LayerOutput + :noindex: + +Data layer +=========== + +data_layer +---------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: data_layer + :noindex: + +Fully Connected Layers +====================== + +fc_layer +-------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: fc_layer + :noindex: + +selective_fc_layer +------------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: selective_fc_layer + :noindex: + +Conv Layers +=========== + +conv_operator +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: conv_operator + :noindex: + +conv_shift_layer +------------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: conv_shift_layer + :noindex: + +img_conv_layer +-------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: img_conv_layer + :noindex: + +context_projection +------------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: context_projection + :noindex: + +Image Pooling Layer +=================== + +img_pool_layer +-------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: img_pool_layer + :noindex: + +Norm Layer +========== + +img_cmrnorm_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: img_cmrnorm_layer + :noindex: + +img_rnorm_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: img_rnorm_layer + :noindex: + +batch_norm_layer +--------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: batch_norm_layer + :noindex: + +sum_to_one_norm_layer +--------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: sum_to_one_norm_layer + :noindex: + +Recurrent Layers +================ + +recurrent_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: recurrent_layer + :noindex: + +lstmemory +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: lstmemory + :noindex: + +lstm_step_layer +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: lstm_step_layer + :noindex: + +grumemory +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: grumemory + :noindex: + +gru_step_layer +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: gru_step_layer + :noindex: + +Recurrent Layer Group +===================== + +get_output_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: get_output_layer + :noindex: + +Mixed Layer +=========== + +mixed_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: mixed_layer + :noindex: + +embedding_layer +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: embedding_layer + :noindex: + +dotmul_projection +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: dotmul_projection + :noindex: + +full_matrix_projection +---------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: full_matrix_projection + :noindex: + +identity_projection +------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: identity_projection + :noindex: + + +table_projection +---------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: table_projection + :noindex: + +trans_full_matrix_projection +---------------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: trans_full_matrix_projection + :noindex: + +Aggregate Layers +================ + +pooling_layer +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: pooling_layer + :noindex: + +last_seq +-------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: last_seq + :noindex: + +first_seq +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: first_seq + :noindex: + +concat_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: concat_layer + :noindex: + +Reshaping Layers +================ + +block_expand_layer +------------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: block_expand_layer + :noindex: + +expand_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: expand_layer + :noindex: + +Math Layers +=========== + +addto_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: addto_layer + :noindex: + +convex_comb_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: convex_comb_layer + :noindex: + +interpolation_layer +------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: interpolation_layer + :noindex: + +power_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: power_layer + :noindex: + +scaling_layer +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: scaling_layer + :noindex: + +slope_intercept_layer +---------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: slope_intercept_layer + :noindex: + +tensor_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: tensor_layer + :noindex: + +trans_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: trans_layer + :noindex: + +Sampling Layers +=============== + +maxid_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: maxid_layer + :noindex: + +sampling_id_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: sampling_id_layer + :noindex: + +Cost Layers +=========== + +cross_entropy +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: cross_entropy + :noindex: + +cross_entropy_with_selfnorm +--------------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: cross_entropy_with_selfnorm + :noindex: + +multi_binary_label_cross_entropy +-------------------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: multi_binary_label_cross_entropy + :noindex: + +huber_cost +---------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: huber_cost + :noindex: + +lambda_cost +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: lambda_cost + :noindex: + +rank_cost +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: rank_cost + :noindex: + +cos_sim +------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: cos_sim + :noindex: + +crf_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: crf_layer + :noindex: + +crf_decoding_layer +------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: crf_decoding_layer + :noindex: + +ctc_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: ctc_layer + :noindex: + +hsigmoid +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: hsigmoid + :noindex: + +Check Layer +============ + +eos_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: eos_layer + :noindex: diff --git a/doc/_sources/ui/api/trainer_config_helpers/layers_index.txt b/doc/_sources/ui/api/trainer_config_helpers/layers_index.txt new file mode 100644 index 0000000000000000000000000000000000000000..c0daab152148ce769948f600c3101bd79f5a1013 --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/layers_index.txt @@ -0,0 +1,7 @@ +Layers +====== + +.. toctree:: + :maxdepth: 3 + + layers.rst diff --git a/doc/_sources/ui/api/trainer_config_helpers/networks.txt b/doc/_sources/ui/api/trainer_config_helpers/networks.txt new file mode 100644 index 0000000000000000000000000000000000000000..255f154ed70733e5bfc200af4de32393e3006aea --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/networks.txt @@ -0,0 +1,5 @@ +Networks +======== + +.. automodule:: paddle.trainer_config_helpers.networks + :members: diff --git a/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt b/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt new file mode 100644 index 0000000000000000000000000000000000000000..3839d932ba0a19d901ca5abd9369b03d39855730 --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt @@ -0,0 +1,5 @@ +Optimizers +========== + +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: diff --git a/doc/_sources/ui/api/trainer_config_helpers/poolings.txt b/doc/_sources/ui/api/trainer_config_helpers/poolings.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d3c5dc486b5f8fdc2ebfa007a5e6582ea47c5f2 --- /dev/null +++ b/doc/_sources/ui/api/trainer_config_helpers/poolings.txt @@ -0,0 +1,5 @@ +Poolings +======== + +.. automodule:: paddle.trainer_config_helpers.poolings + :members: diff --git a/doc/_sources/ui/cmd_argument/argument_outline.txt b/doc/_sources/ui/cmd_argument/argument_outline.txt new file mode 100644 index 0000000000000000000000000000000000000000..98dadc270dcac8cb5c05f3065c98bac78671d7fa --- /dev/null +++ b/doc/_sources/ui/cmd_argument/argument_outline.txt @@ -0,0 +1,404 @@ +# Argument Outline + +It looks like there are a lot of arguments. However, most of them are for developers or alrealy set automatically in cluster submitting environment and users do not need to care about them. Here, we divide these arguments into serveral classes according to the scenario that they are used in. For example, the arguments in `common` can be used in all scenes. Some arguments can be only used in certain layers. Some are needed by multi machines training in cluster, etc. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +√ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
argslocal traincluster trainlocal testcluster test
commonjob
use_gpu
local
config
config_args
num_passes
trainer_count
version
show_layer_stat
traindot_period
test_period
saving_period
show_parameter_stats_period
init_model_path
load_missing_parameter_strategy
saving_period_by_batches
use_old_updater
enable_grad_share
grad_share_block_num
log_error_clipping
log_clipping
save_only_one
allow_inefficient_sparse_update
start_pass
train/testsave_dir
testing during trainingtest_all_data_in_one_period
average_test_period
testmodel_list
test_wait
test_pass
predict_output_dir
distribute_test
Auc/PnpairValidationpredict_file
GPUgpu_id
parallel_nn
allow_only_one_model_on_one_gpu
cudnn_dir
cuda_dir
RNNbeam_size
rnn_use_batch
prev_batch_state
diy_beam_search_prob_so
metric learningexternal
data_server_port
PServerstart_pserver
pservers
port
port_num
ports_num_for_sparse
nics
rdma_tcp
small_messages
loadsave_parameters_in_pserver
log_period_server
pserver_num_threads
sock_send_buf_size
sock_recv_buf_size
num_gradient_servers
parameter_block_size
parameter_block_size_for_sparse
Async SGDasync_count
async_lagged_ratio_min
async_lagged_ratio_default
Performance Tuninglog_barrier_abstract
log_barrier_lowest_nodes
log_barrier_show_log
check_sparse_distribution_batches
check_sparse_distribution_ratio
check_sparse_distribution_unbalance_degree
check_sparse_distribution_in_pserver
show_check_sparse_distribution_log
Data Providermemory_threshold_on_load_data
RandomNumberseed
thread_local_rand_use_global_seed
UnitTestcheckgrad_eps
Matrix/Vectorenable_parallel_vector
+ diff --git a/doc/_sources/ui/cmd_argument/detail_introduction.txt b/doc/_sources/ui/cmd_argument/detail_introduction.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d0362d022a72b597e78e760893c91df449e5745 --- /dev/null +++ b/doc/_sources/ui/cmd_argument/detail_introduction.txt @@ -0,0 +1,336 @@ +# Detail Description + +## Common + +* `--job` + - Job mode, including: **train, test, checkgrad**, where checkgrad is mainly for developers and users do not need to care about. + - type: string (default: train) + +* `--config` + - Use to specfiy network configure file. + - type: string (default: null). + +* `--use_gpu` + - Whether to use GPU for training, false is cpu mode and true is gpu mode. + - type: bool (default: 1). + +* `--local` + - Whether the training is in local mode or not. True when training locally or using one node in cluster. False when using multiple machines in cluster. + - type: bool (default: 1). + +* `--trainer_count` + - Define the number of threads used in one machine. For example, trainer_count = 4, means use 4 GPU in GPU mode and 4 threads in CPU mode. Each thread (or GPU) is assigned to 1/4 samples in current batch. That is to say, if setting batch_size of 512 in trainer config, each thread train 128 samples. + - type: int32 (default: 1). + +* `--num_passes` + - When `--job=train`, means training for num_passes passes. One pass means training all samples in dataset one time. When `--job=test`, means testing data from model of test_pass to model of (num_passes - 1). + - type: int32 (default: 100). + +* `--config_args` + - arguments passed to config file. Format: key1=value1,key2=value2. + - type: string (default: null). + +* `--version` + - Whether to print version infomatrion. + - type: bool (default: 0). + +* `--show_layer_stat` + - Whether to show the statistics of each layer **per batch**. + - type: bool (default: 0). + +## Train + +* `--log_period` + - Log progress every log_period batches. + - type: int32 (default: 100). + +* `--dot_period` + - Print '.' every dot_period batches. + - type: int32 (default: 1). + +* `--saving_period` + - Save parameters every saving_period passes + - type: int32 (default: 1). + +* `--save_dir` + - Directory for saving model parameters. It needs to be specified, but no need to be created in advance. + - type: string (default: null). + +* `--start_pass` + - Start training from this pass. It will load parameters from the previous pass. + - type: int32 (default: 0). + +* `--show_parameter_stats_period` + - Show parameter statistic during training every show_parameter_stats_period batches. It will not show by default. + - type: int32 (default: 0). + +* `--save_only_one` + - Save the parameters only in last pass, while the previous parameters will be removed. + - type: bool (default: 0). + +* `--load_missing_parameter_strategy` + - Specify the loading operation when model file is missing. Now support fail/rand/zere three operations. + - `fail`: program will exit. + - `rand`: uniform or normal distribution according to **initial\_strategy** in network config. Uniform range is: **[mean - std, mean + std]**, where mean and std are configures in trainer config. + - `zero`: all parameters are zero. + - type: string (default: fail). + +* `--init_model_path` + - Path of the initialization model. If it was set, start\_pass will be ignored. It can be used to specify model path in testing mode as well. + - type: string (default: null). + +* `--saving_period_by_batches` + - Save parameters every saving_period_by_batches batches in one pass. + - type: int32 (default: 0). + +* `--log_error_clipping` + - Whether to print error clipping log when setting **error_clipping_threshold** in layer config. If it is true, log will be printed in backward propagation **per batch**. This clipping effects on **gradient of output**. + - type: bool (default: 0). + +* `--log_clipping` + - Enable print log clipping or not when setting **gradient_clipping_threshold** in trainer config. This clipping effects on **gradient w.r.t. (with respect to) weight**. + - type: bool (default: 0). + +* `--use_old_updater` + - Whether to use the old RemoteParameterUpdater. Default use ConcurrentRemoteParameterUpdater. It is mainly for deverlopers and users usually do not need to care about. + - type: bool (default: 0). + +* `--enable_grad_share` + - threshold for enable gradient parameter, which is shared for batch multi-cpu training. + - type: int32 (default: 100 \* 1024 \* 1024). + +* `--grad_share_block_num` + - block number of gradient parameter, which is shared for batch multi-cpu training. + - type: int32 (default: 64). + +## Test + +* `--test_pass` + - Load parameter from this pass to test. + - type: int32 (default: -1). + +* `--test_period` + - Run testing every test_period train batches. If not set, run testing each pass. + - type: int32 (default: 1000). + +* `--test_wait` + - Whether to wait for parameter per pass if not exist. If set test_data_path in submitting environment of cluster, it will launch one process to perfom testing, so we need to set test_wait=1. Note that in the cluster submitting environment, this argument has been set True by default. + - type: bool (default: 0). + +* `--model_list` + - File that saves the model list when testing. It was set automatically when using cluster submitting environment after setting model_path. + - type: string (default: "", null). + +* `--test_all_data_in_one_period` + - This argument is usually used in testing period during traning. If true, all data will be tested in one test period. Otherwise (batch_size * log_peroid) data will be tested. + - type: bool (default: 0). + +* `--predict_output_dir` + - Directory that saves the layer output. It is configured in Outputs() in network config. Default, this argument is null, meaning save nothing. Specify this directory if you want to save feature map of some layers in testing mode. Note that, layer outputs are values after activation function. + - type: string (default: "", null). + +* `--average_test_period` + - Do test on average parameter every `average_test_period` batches. It MUST be devided by FLAGS_log_period. Default 0 means do not test on average parameter. + - type: int32 (default: 0). + +* `--distribute_test` + - Testing in distribute environment will merge results from multiple machines. + - type: bool (default: 0). + +* `--predict_file` + - File name for saving predicted result. Default, this argument is null, meaning save nothing. Now, this argument is only used in AucValidationLayer and PnpairValidationLayer, and saves predicted result every pass. + - type: string (default: "", null). + +## GPU + +* `--gpu_id` + - Which gpu core to use. + - type: int32 (default: 0). + +* `--allow_only_one_model_on_one_gpu` + - If true, do not allow multiple models on one GPU device. + - type: bool (default: 1). + +* `--parallel_nn` + - Whether to use multi-thread to calculate one neural network or not. If false, use gpu_id specify which gpu core to use (the device property in trainer config will be ingored). If true, the gpu core is specified in trainer config (gpu_id will be ignored). + - type: bool (default: 0). + +* `--cudnn_dir` + - Choose path to dynamic load NVIDIA CuDNN library, for instance, /usr/local/cuda/lib64. [Default]: LD_LIBRARY_PATH + - type: string (default: "", null) + +* `--cuda_dir` + - Choose path to dynamic load NVIDIA CUDA library, for instance, /usr/local/cuda/lib64. [Default]: LD_LIBRARY_PATH + - type: string (default: "", null) + +## NLP: RNN/LSTM/GRU +* `--rnn_use_batch` + - Whether to use batch method for calculation in simple RecurrentLayer. + - type: bool (default: 0). + +* `--prev_batch_state` + - batch is continue with next batch. + - type: bool (default: 0). + +* `--beam_size` + - Beam search uses breadth-first search to build its search tree. At each level of the tree, it generates all successors of the states at the current level, sorting them in increasing order of heuristic cost. However, it only stores a predetermined number of best states at each level (called the beam size). + - type: int32 (default: 1). + +* `--diy_beam_search_prob_so` + - Specify shared dynamic library. It can be defined out of paddle by user. + - type: string (default: "", null). + +## Metric Learning +* `--external` + - Whether to use external machine for metric learning. + - type: bool (default: 0). + +* `--data_server_port` + - Listening port for dserver (data server), dserver is mainly used in metric learning. + - type: int32 (default: 21134). + +## DataProvider + +* `--memory_threshold_on_load_data` + - Stop loading data when memory is not sufficient. + - type: double (default: 1.0). + +## Unit Test + +* `--checkgrad_eps` + - parameter change size for checkgrad. + - type: double (default: 1e-05). + +## Parameter Server and Distributed Communication + +* `--start_pserver` + - Whether to start pserver (parameter server). + - type: bool (default: 0). + +* `--pservers` + - Comma separated IP addresses of pservers. It is set automatically in cluster submitting environment. + - type: string (default: "127.0.0.1"). + +* `--port` + - Listening port for pserver. + - type: int32 (default: 20134). + +* `--ports_num` + - The ports number for parameter send, increment based on default port number. + - type: int32 (default: 1). + +* `--trainer_id` + - In distributed training, each trainer must be given an unique id ranging from 0 to num_trainers-1. Trainer 0 is the master trainer. User do not need to care this flag. + - type: int32 (default: 0). + +* `--num_gradient_servers` + - Numbers of gradient servers. This arguments is set automatically in cluster submitting environment. + - type: int32 (default: 1). + +* `--small_messages` + - If message size is small, recommend set it True to enable quick ACK and no delay + - type: bool (default: 0). + +* `--sock_send_buf_size` + - Restrict socket send buffer size. It can reduce network congestion if set carefully. + - type: int32 (default: 1024 \* 1024 \* 40). + +* `--sock_recv_buf_size` + - Restrict socket recieve buffer size. + - type: int32 (default: 1024 \* 1024 \* 40). + +* `--parameter_block_size` + - Parameter block size for pserver, will automatically calculate a suitable value if it's not set. + - type: int32 (default: 0). + +* `--parameter_block_size_for_sparse` + - Parameter block size for sparse update pserver, will automatically calculate a suitable value if it's not set. + - type: int32 (default: 0). + +* `--log_period_server` + - Log progress every log_period_server batches at pserver end. + - type: int32 (default: 500). + +* `--loadsave_parameters_in_pserver` + - Load and save parameters in pserver. Only work when parameter set sparse_remote_update. + - type: bool (default: 0). + +* `--pserver_num_threads` + - number of threads for sync op exec. + - type: bool (default: 1). + +* `--ports_num_for_sparse` + - The ports number for parameter send, increment based on default (port + ports_num). It is used by sparse Tranning. + - type: int32 (default: 0). + +* `--nics` + - Network device name for pservers, already set in cluster submitting environment. + - type: string (default: "xgbe0,xgbe1"). + +* `--rdma_tcp` + - Use rdma or tcp transport protocol, already set in cluster submitting environment. + - type: string (default: "tcp"). + +## Async SGD +* `--async_count` + - Defined the asynchronous training length, if 0, then use synchronized training. + - type: int32 (default: 0). + +* `--async_lagged_ratio_min` + - Control the minimize value of `config_.async_lagged_grad_discard_ratio()`. + - type: double (default: 1.0). + +* `--async_lagged_ratio_default` + - If async_lagged_grad_discard_ratio is not set in network config, use it as defalut value. + - type: double (default: 1.5). + +## Performance Tuning + +* `--log_barrier_abstract` + - If true, show abstract barrier performance information. + - type: bool (default: 1). + +* `--log_barrier_show_log` + - If true, always show barrier abstract even with little gap. + - type: bool (default: 0). + +* `--log_barrier_lowest_nodes` + - How many lowest node will be logged. + - type: int32 (default: 5). + +* `--check_sparse_distribution_in_pserver` + - Whether to check that the distribution of sparse parameter on all pservers is balanced. + - type: bool (default: 0). + +* `--show_check_sparse_distribution_log` + - show log details for sparse parameter distribution in pserver. + - type: bool (default: 0). + +* `--allow_inefficient_sparse_update` + - Whether to allow inefficient sparse update. + - type: bool (default: 0). + +* `--check_sparse_distribution_batches` + - Running sparse parameter distribution check every so many batches. + - type: int32 (default: 100). + +* `--check_sparse_distribution_ratio` + - If parameters dispatched to different pservers have an unbalanced distribution for check_sparse_distribution_ratio * check_sparse_distribution_batches times, crash program. + - type: double (default: 0.6). + +* `--check_sparse_distribution_unbalance_degree` + - The ratio of maximum data size / minimun data size for different pserver. + - type: double (default: 2). + +## Matrix/Vector/RandomNumber +* `--enable_parallel_vector` + - threshold for enable parallel vector. + - type: int32 (default: 0). + +* `--seed` + - random number seed. 0 for srand(time) + - type: int32 (default: 1) + +* `--thread_local_rand_use_global_seed` + - Whether to use global seed in rand of thread local. + - type: bool (default: 0). diff --git a/doc/_sources/ui/cmd_argument/use_case.txt b/doc/_sources/ui/cmd_argument/use_case.txt new file mode 100644 index 0000000000000000000000000000000000000000..a6bfba29af4f73055338c3a671bcafaa1456c7cf --- /dev/null +++ b/doc/_sources/ui/cmd_argument/use_case.txt @@ -0,0 +1,183 @@ +# Use Case + +## Local Training + +These command line arguments are commonly used by local training experiments, such as image classification, natural language processing, et al. + +``` +paddle train \ + --use_gpu=1/0 \ #1:GPU,0:CPU(default:true) + --config=network_config \ + --save_dir=output \ + --trainer_count=COUNT \ #(default:1) + --test_period=M \ #(default:1000) + --test_all_data_in_one_period=true \ #(default:false) + --num_passes=N \ #(defalut:100) + --log_period=K \ #(default:100) + --dot_period=1000 \ #(default:1) + #[--show_parameter_stats_period=100] \ #(default:0) + #[--saving_period_by_batches=200] \ #(default:0) +``` +`show_parameter_stats_period` and `saving_period_by_batches` are optional according to your task. + +### 1) Pass Command Argument to Network config + +`config_args` is a useful parameter to pass arguments to network config. + +``` +--config_args=generating=1,beam_size=5,layer_num=10 \ +``` +And `get_config_arg` can be used to parse these arguments in network config as follows: + +``` +generating = get_config_arg('generating', bool, False) +beam_size = get_config_arg('beam_size', int, 3) +layer_num = get_config_arg('layer_num', int, 8) +``` + +`get_config_arg`: + +``` +get_config_arg(name, type, default_value) +``` +- name: the name specified in the `--config_args` +- type: value type, bool, int, str, float etc. +- default_value: default value if not set. + +### 2) Use Model to Initialize Network + +add argument: + +``` +--init_model_path=model_path +--load_missing_parameter_strategy=rand +``` + +## Local Testing + +Method 1: + +``` +paddle train --job=test \ + --use_gpu=1/0 \ + --config=network_config \ + --trainer_count=COUNT \ + --init_model_path=model_path \ +``` +- use init\_model\_path to specify test model. +- only can test one model. + +Method 2: + +``` +paddle train --job=test \ + --use_gpu=1/0 \ + --config=network_config \ + --trainer_count=COUNT \ + --model_list=model.list \ +``` +- use model_list to specify test models +- can test several models, where model.list likes: + +``` +./alexnet_pass1 +./alexnet_pass2 +``` + +Method 3: + +``` +paddle train --job=test \ + --use_gpu=1/0 \ + --config=network_config \ + --trainer_count=COUNT \ + --save_dir=model \ + --test_pass=M \ + --num_passes=N \ +``` +This way must use model path saved by Paddle like this: `model/pass-%5d`. Testing model is from M-th pass to (N-1)-th pass. For example: M=12 and N=14 will test `model/pass-00012` and `model/pass-00013`. + +## Sparse Training + +Sparse training is usually used to accelerate calculation when input is sparse data with highly dimension. For example, dictionary dimension of input data is 1 million, but one sample just have several words. In paddle, sparse matrix multiplication is used in forward propagation and sparse updating is perfomed on weight updating after backward propagation. + +### 1) Local training + +You need to set **sparse\_update=True** in network config. Check the network config documentation for more details. + +### 2) cluster training + +Add the following argument for cluster training of a sparse model. At the same time you need to set **sparse\_remote\_update=True** in network config. Check the network config documentation for more details. + +``` +--ports_num_for_sparse=1 #(default: 0) +``` + +## parallel_nn +`parallel_nn` can be set to mixed use of GPUs and CPUs to compute layers. That is to say, you can deploy network to use a GPU to compute some layers and use a CPU to compute other layers. The other way is to split layers into different GPUs, which can **reduce GPU memory** or **use parallel computation to accelerate some layers**. + +If you want to use these characteristics, you need to specify device ID in network config (denote it as deviceId) and add command line argument: + +``` +--parallel_nn=true +``` +### case 1: Mixed Use of GPU and CPU +Consider the following example: + +``` +#command line: +paddle train --use_gpu=true --parallel_nn=true trainer_count=COUNT + +default_device(0) + +fc1=fc_layer(...) +fc2=fc_layer(...) +fc3=fc_layer(...,layer_attr=ExtraAttr(device=-1)) + +``` +- default_device(0): set default device ID to 0. This means that except the layers with device=-1, all layers will use a GPU, and the specific GPU used for each layer depends on trainer\_count and gpu\_id (0 by default). Here, layer l1 and l2 are computed on the GPU. + +- device=-1: use the CPU for layer l3. + +- trainer_count: + - trainer_count=1: if gpu\_id is not set, then use the first GPU to compute layers l1 and l2. Otherwise use the GPU with gpu\_id. + + - trainer_count>1: use trainer\_count GPUs to compute one layer using data parallelism. For example, trainer\_count=2 means that GPUs 0 and 1 will use data parallelism to compute layer l1 and l2. + +### Case 2: Specify Layers in Different Devices + +``` +#command line: +paddle train --use_gpu=true --parallel_nn=true --trainer_count=COUNT + +#network: +fc2=fc_layer(input=l1, layer_attr=ExtraAttr(device=0), ...) +fc3=fc_layer(input=l1, layer_attr=ExtraAttr(device=1), ...) +fc4=fc_layer(input=fc2, layer_attr=ExtraAttr(device=-1), ...) +``` +In this case, we assume that there are 4 GPUs in one machine. + +- trainer_count=1: + - Use GPU 0 to compute layer l2. + - Use GPU 1 to compute layer l3. + - Use CPU to compute layer l4. + +- trainer_count=2: + - Use GPU 0 and 1 to compute layer l2. + - Use GPU 2 and 3 to compute layer l3. + - Use CPU to compute l4 in two threads. + +- trainer_count=4: + - It will fail (note, we have assumed that there are 4 GPUs in machine), because argument `allow_only_one_model_on_one_gpu` is true by default. + +**Allocation of device ID when `device!=-1`**: + +``` +(deviceId + gpu_id + threadId * numLogicalDevices_) % numDevices_ + +deviceId: specified in layer. +gpu_id: 0 by default. +threadId: thread ID, range: 0,1,..., trainer_count-1 +numDevices_: device (GPU) count in machine. +numLogicalDevices_: min(max(deviceId + 1), numDevices_) +``` diff --git a/doc/_sources/ui/data_provider/index.txt b/doc/_sources/ui/data_provider/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..49f6e8fbc87edd6fba0c04323140122448277b02 --- /dev/null +++ b/doc/_sources/ui/data_provider/index.txt @@ -0,0 +1,55 @@ +# DataProvider Tutorial # + +DataProvider is responsible for data management in PaddlePaddle, corresponding to Data Layer. + +## Input Data Format ## +PaddlePaddle uses **Slot** to describe the data layer of neural network. One slot describes one data layer. Each slot stores a series of samples, and each sample contains a set of features. There are three attributes of a slot: ++ **Dimension**: dimenstion of features ++ **SlotType**: there are 5 different slot types in PaddlePaddle, following table compares the four commonly used ones. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SlotTypeFeature DescriptionVector Description
DenseSlotContinuous FeaturesDense Vector
SparseNonValueSlotDiscrete Features without weightsSparse Vector with all non-zero elements equaled to 1
SparseValueSlotDiscrete Features with weightsSparse Vector
IndexSlotmostly the same as SparseNonValueSlot, but especially for a single labelSparse Vector with only one value in each time step
+
+ +And the remained one is **StringSlot**. It stores Character String, and can be used for debug or to describe data Id for prediction, etc. ++ **SeqType**: a **sequence** is a sample whose features are expanded in time scale. And a **sub-sequence** is a continous ordered subset of a sequence. For example, (a1, a2) and (a3, a4, a5) are two sub-sequences of one sequence (a1, a2, a3, a4, a5). Following are 3 different sequence types in PaddlePaddle: + - **NonSeq**: input sample is not sequence + - **Seq**: input sample is a sequence without sub-sequence + - **SubSeq**: input sample is a sequence with sub-sequence + +## Python DataProvider + +PyDataProviderWrapper is a python decorator in PaddlePaddle, used to read custom python DataProvider class. It currently supports all SlotTypes and SeqTypes of input data. User should only concern how to read samples from file. Feel easy with its [Use Case](python_case.md) and API Reference. diff --git a/doc/_sources/ui/data_provider/python_case.txt b/doc/_sources/ui/data_provider/python_case.txt new file mode 100644 index 0000000000000000000000000000000000000000..b3ce70522318a249f8063808f47555ca75d5d338 --- /dev/null +++ b/doc/_sources/ui/data_provider/python_case.txt @@ -0,0 +1,112 @@ +# Python Use Case # + +This tutorial guides you into using python script that converts user input data into PaddlePaddle Data Format. + +## Quick Start ## + +We use a custom data to show the quick usage. This data consists of two parts with semicolon-delimited `';'`: a) label with 2 dimensions, b) continuous features with 9 dimensions: + + 1;0 0 0 0 0.192157 0.070588 0.215686 0.533333 0 + 0;0 0 0 0.988235 0.913725 0.329412 0.376471 0 0 + +The `simple_provider.py` defines a python data provider: + +```python +from trainer.PyDataProviderWrapper import DenseSlot, IndexSlot, provider + +@provider([DenseSlot(9), IndexSlot(2)]) +def process(obj, file_name): + with open(file_name, 'r') as f: + for line in f: + line = line.split(";") + label = int(line[0]) + image = [float(x) for x in line[1].split()[1:]] + yield label, image +``` + +- `@provider`: specify the SlotType and its dimension. Here, we have 2 Slots, DenseSlot(9) stores continuous features with 9 dimensions, and IndexSlot(2) stores label with 2 dimensions. +- `process`: a generator using **yield** keyword to return results one by one. Here, the return format is 1 Discrete Feature and a list of 9 float Continuous Features. + +The corresponding python **Train** data source `define_py_data_source` is: + +```python +define_py_data_sources('train.list', None, 'simple_provider', 'process') +``` +See here for detail API reference of `define_py_data_sources`. + +## Sequence Example ## + +In some tasks such as Natural Language Processing (NLP), the dimension of Slot is related to the dictionary size, and the dictionary should be dynamically loaded during training or generating. PyDataProviderWrapper can satisfy all these demands easily. + +### Sequence has no sub-sequence ### +Following is an example of data provider when using LSTM network to do sentiment analysis (If you want to understand the whole details of this task, please refer to [Sentiment Analysis Tutorial](../demo/sentiment_analysis/index.md)). + +The input data consists of two parts with two-tabs-delimited: a) label with 2 dimensions, b) sequence with dictionary length dimensions: + + 0 I saw this movie at the AFI Dallas festival . It all takes place at a lake house and it looks wonderful . + 1 This documentary makes you travel all around the globe . It contains rare and stunning sequels from the wilderness . + ... + +The `dataprovider.py` in `demo/sentiment` is: + +```python +from trainer.PyDataProviderWrapper import * + +@init_hook_wrapper +def hook(obj, dictionary, **kwargs): + obj.word_dict = dictionary + obj.slots = [IndexSlot(len(obj.word_dict)), IndexSlot(2)] + obj.logger.info('dict len : %d' % (len(obj.word_dict))) + +@provider(use_seq=True, init_hook=hook) +# @provider(use_seq=True, init_hook=hook, pool_size=PoolSize(5000)) +def process(obj, file_name): + with open(file_name, 'r') as fdata: + for line_count, line in enumerate(fdata): + label, comment = line.strip().split('\t\t') + label = int(''.join(label.split(' '))) + words = comment.split() + word_slot = [obj.word_dict[w] for w in words if w in obj.word_dict] + yield word_slot, [label] +``` + +- `hook`: Initialization hook of data provider. Here, it reads the dictionary, sets the obj.slots based on the dictionary length, and uses obj.logger to output some logs. +- `process`: Here, as the Sequence Mode of input is **Seq** and SlotType is IndexSlot, use_seq is set to True, and the yield format is `[int, int, ....]`. +- `PoolSize`: If there are a lot of data, you may need this argument to increase loading speed and reduce memory footprint. Here, PoolSize(5000) means read at most 5000 samples to memory. + +The corresponding python **Train/Test** data sources `define_py_data_sources` is: + +```python +train_list = train_list if not is_test else None +word_dict = dict() +with open(dict_file, 'r') as f: + for i, line in enumerate(open(dict_file, 'r')): + word_dict[line.split('\t')[0]] = i + +define_py_data_sources(train_list, test_list, module = "dataprovider", obj = "processData", + args = {'dictionary': word_dict}, train_async = True) +``` + +### Sequence has sub-sequence ### + +If the sequence of above input data is considered as several sub-sequences joint by dot `'.'`, quesion mark `'?'` or exclamation mark `'!'`, see `processData2` in `demo/sentiment/dataprovider.py` as follows: + +```python +import re + +@provider(use_seq=True, init_hook=hook) +def process2(obj, file_name): + with open(file_name, 'r') as fdata: + pat = re.compile(r'[^.?!]+[.?!]') + for line_count, line in enumerate(fdata): + label, comment = line.strip().split('\t\t') + label = int(''.join(label.split(' '))) + words_list = pat.findall(comment) + word_slot_list = [[obj.word_dict[w] for w in words.split() \ + if w in obj.word_dict] for words in words_list] + yield word_slot_list, [[label]] +``` + +- `hook`: the same as above. Note that as **SubSeq Slot must put before Seq Slot** in PaddlePaddle, we could not reverse the yield order in this case. +- `process2`: Here, as the Sequence Mode of input is **SubSeq**, and the SlotType is IndexSlot, use_seq is set to True, and the yield format is `[[int, int, ...], [int, int, ...], ... ]`. +- `define_py_data_sources`: the same as above. diff --git a/doc/_sources/ui/index.txt b/doc/_sources/ui/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..976d3382444ad3c63df28996e1a57fd9eb9c2aa1 --- /dev/null +++ b/doc/_sources/ui/index.txt @@ -0,0 +1,21 @@ +# User Interface + +## Data Provider + +* [Introduction](data_provider/index.md) +* [Python Use Case](data_provider/python_case.md) + +## API Reference + +* [PyDataProviderWrapper](api/py_data_provider_wrapper.rst) +* [Trainer Config Helpers](api/trainer_config_helpers/index.md) + +## Command Line Argument + +* [Use Case](cmd_argument/use_case.md) +* [Argument Outline](cmd_argument/argument_outline.md) +* [Detail Description](cmd_argument/detail_introduction.md) + +## Predict + +* [Python Prediction API](predict/swig_py_paddle_en.rst) diff --git a/doc/_sources/ui/predict/swig_py_paddle_en.txt b/doc/_sources/ui/predict/swig_py_paddle_en.txt new file mode 100644 index 0000000000000000000000000000000000000000..e22d0bff338d95f611f5c336adb1092b893d7329 --- /dev/null +++ b/doc/_sources/ui/predict/swig_py_paddle_en.txt @@ -0,0 +1,51 @@ +Python Prediction API +===================== + +PaddlePaddle offers a set of clean prediction interfaces for python with the help of +SWIG. The main steps of predict values in python are: + +* Parse training configurations +* Construct GradientMachine +* Prepare data +* Predict + +Here is a sample python script that shows the typical prediction process for the +MNIST classification problem. + +.. literalinclude:: ./predict_sample.py + :language: python + :linenos: + +The module that does the most of the job is py_paddle.swig_paddle, it's +generated by SWIG and has complete documents, for more details you can use +python's :code:`help()` function. Let's walk through the above python script: + +* At the beginning, initialize PaddlePaddle with command line arguments(line 90). +* Parse the configuration file that is used in training(line 93). +* Create a neural network at line 95 according the parsed configuration, then + load the trained parameters from model at line 97. +* A utility class for data transformation is created at line 98. + - Note: As swig_paddle can only accept C++ matrices, we offer a utility + class DataProviderWraaperConverter that can accept the same input data with + PyDataProviderWrapper, for more information please refer to document + of `PyDataProviderWrapper <../py_data_provider_wrapper_api.html>`_. +* Do the prediction and output the result at line 100, forwardTest is another + utility class that directly takes the activations of the output layer. + +Here is a typical output: + +.. code-block:: text + + [{'id': None, 'value': array([[ 5.53018653e-09, 1.12194102e-05, 1.96644767e-09, + 1.43630644e-02, 1.51111044e-13, 9.85625684e-01, + 2.08823112e-10, 2.32777140e-08, 2.00186201e-09, + 1.15501715e-08], + [ 9.99982715e-01, 1.27787406e-10, 1.72296313e-05, + 1.49316648e-09, 1.36540484e-11, 6.93137714e-10, + 2.70634608e-08, 3.48565123e-08, 5.25639710e-09, + 4.48684503e-08]], dtype=float32)}] + +:code:`value` is the output of the output layer, each row represents result of +the corresponding row in the input data, each element represents activation of +the corresponding neuron in the output layer. + diff --git a/doc/_static/ajax-loader.gif b/doc/_static/ajax-loader.gif new file mode 100644 index 0000000000000000000000000000000000000000..61faf8cab23993bd3e1560bff0668bd628642330 Binary files /dev/null and b/doc/_static/ajax-loader.gif differ diff --git a/doc/_static/basic.css b/doc/_static/basic.css new file mode 100644 index 0000000000000000000000000000000000000000..c89fc7e920b41365e5fb89c104b35fbd18179b89 --- /dev/null +++ b/doc/_static/basic.css @@ -0,0 +1,599 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox input[type="text"] { + width: 170px; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + width: 30px; +} + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable dl, table.indextable dd { + margin-top: 0; + margin-bottom: 0; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- general body styles --------------------------------------------------- */ + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.field-list ul { + padding-left: 1em; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.field-list td, table.field-list th { + border: 0 !important; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, .highlighted { + background-color: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/doc/_static/classic.css b/doc/_static/classic.css new file mode 100644 index 0000000000000000000000000000000000000000..d98894b3f666966797bc7444b3c43bfcfa4e1bc1 --- /dev/null +++ b/doc/_static/classic.css @@ -0,0 +1,261 @@ +/* + * default.css_t + * ~~~~~~~~~~~~~ + * + * Sphinx stylesheet -- default theme. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: sans-serif; + font-size: 100%; + background-color: #11303d; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + background-color: #1c4e63; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +div.body { + background-color: #ffffff; + color: #000000; + padding: 0 20px 30px 20px; +} + +div.footer { + color: #ffffff; + width: 100%; + padding: 9px 0 9px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #ffffff; + text-decoration: underline; +} + +div.related { + background-color: #133f52; + line-height: 30px; + color: #ffffff; +} + +div.related a { + color: #ffffff; +} + +div.sphinxsidebar { +} + +div.sphinxsidebar h3 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.4em; + font-weight: normal; + margin: 0; + padding: 0; +} + +div.sphinxsidebar h3 a { + color: #ffffff; +} + +div.sphinxsidebar h4 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.3em; + font-weight: normal; + margin: 5px 0 0 0; + padding: 0; +} + +div.sphinxsidebar p { + color: #ffffff; +} + +div.sphinxsidebar p.topless { + margin: 5px 10px 10px 10px; +} + +div.sphinxsidebar ul { + margin: 10px; + padding: 0; + color: #ffffff; +} + +div.sphinxsidebar a { + color: #98dbcc; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + + + +/* -- hyperlink styles ------------------------------------------------------ */ + +a { + color: #355f7c; + text-decoration: none; +} + +a:visited { + color: #355f7c; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + + + +/* -- body styles ----------------------------------------------------------- */ + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Trebuchet MS', sans-serif; + background-color: #f2f2f2; + font-weight: normal; + color: #20435c; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + text-align: justify; + line-height: 130%; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.admonition p { + margin-bottom: 5px; +} + +div.admonition pre { + margin-bottom: 5px; +} + +div.admonition ul, div.admonition ol { + margin-bottom: 5px; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 5px; + background-color: #eeffcc; + color: #333333; + line-height: 120%; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +code { + background-color: #ecf0f3; + padding: 0 1px 0 1px; + font-size: 0.95em; +} + +th { + background-color: #ede; +} + +.warning code { + background: #efc2c2; +} + +.note code { + background: #d6d6d6; +} + +.viewcode-back { + font-family: sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} + +div.code-block-caption { + color: #efefef; + background-color: #1c4e63; +} \ No newline at end of file diff --git a/doc/_static/comment-bright.png b/doc/_static/comment-bright.png new file mode 100644 index 0000000000000000000000000000000000000000..551517b8c83b76f734ff791f847829a760ad1903 Binary files /dev/null and b/doc/_static/comment-bright.png differ diff --git a/doc/_static/comment-close.png b/doc/_static/comment-close.png new file mode 100644 index 0000000000000000000000000000000000000000..09b54be46da3f0d4a5061da289dc91d8a2cdbc9c Binary files /dev/null and b/doc/_static/comment-close.png differ diff --git a/doc/_static/comment.png b/doc/_static/comment.png new file mode 100644 index 0000000000000000000000000000000000000000..92feb52b8824c6b0f59b658b1196c61de9162a95 Binary files /dev/null and b/doc/_static/comment.png differ diff --git a/doc/_static/doctools.js b/doc/_static/doctools.js new file mode 100644 index 0000000000000000000000000000000000000000..e2e70cc287e06e4af0b8ce70f28a65b44079cdae --- /dev/null +++ b/doc/_static/doctools.js @@ -0,0 +1,263 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s == 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node) { + if (node.nodeType == 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { + var span = document.createElement("span"); + span.className = className; + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this); + }); + } + } + return this.each(function() { + highlight(this); + }); +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated == 'undefined') + return string; + return (typeof translated == 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated == 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) == 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this == '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/doc/_static/down-pressed.png b/doc/_static/down-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..7c30d004b71b32bb2fc06b3bd4dc8278baab0946 Binary files /dev/null and b/doc/_static/down-pressed.png differ diff --git a/doc/_static/down.png b/doc/_static/down.png new file mode 100644 index 0000000000000000000000000000000000000000..f48098a43b0c36342db9e1a9a7372e79b2484a59 Binary files /dev/null and b/doc/_static/down.png differ diff --git a/doc/_static/file.png b/doc/_static/file.png new file mode 100644 index 0000000000000000000000000000000000000000..254c60bfbe2715ae2edca48ebccfd074deb8031d Binary files /dev/null and b/doc/_static/file.png differ diff --git a/doc/_static/jquery-1.11.1.js b/doc/_static/jquery-1.11.1.js new file mode 100644 index 0000000000000000000000000000000000000000..d4b67f7e6c1a94df167f31657769717a71581066 --- /dev/null +++ b/doc/_static/jquery-1.11.1.js @@ -0,0 +1,10308 @@ +/*! + * jQuery JavaScript Library v1.11.1 + * http://jquery.com/ + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * + * Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-05-01T17:42Z + */ + +(function( global, factory ) { + + if ( typeof module === "object" && typeof module.exports === "object" ) { + // For CommonJS and CommonJS-like environments where a proper window is present, + // execute the factory and get jQuery + // For environments that do not inherently posses a window with a document + // (such as Node.js), expose a jQuery-making factory as module.exports + // This accentuates the need for the creation of a real window + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +}(typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Can't do this because several apps including ASP.NET trace +// the stack via arguments.caller.callee and Firefox dies if +// you try to trace through "use strict" call chains. (#13335) +// Support: Firefox 18+ +// + +var deletedIds = []; + +var slice = deletedIds.slice; + +var concat = deletedIds.concat; + +var push = deletedIds.push; + +var indexOf = deletedIds.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var support = {}; + + + +var + version = "1.11.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android<4.1, IE<9 + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([\da-z])/gi, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // Start with an empty selector + selector: "", + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num != null ? + + // Return just the one element from the set + ( num < 0 ? this[ num + this.length ] : this[ num ] ) : + + // Return all the elements in a clean array + slice.call( this ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + ret.context = this.context; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + // (You can seed the arguments with an array of args, but this is + // only used internally.) + each: function( callback, args ) { + return jQuery.each( this, callback, args ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map(this, function( elem, i ) { + return callback.call( elem, i, elem ); + })); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(null); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: deletedIds.sort, + splice: deletedIds.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var src, copyIsArray, copy, name, options, clone, + target = arguments[0] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + target = {}; + } + + // extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values + if ( (options = arguments[ i ]) != null ) { + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray(src) ? src : []; + + } else { + clone = src && jQuery.isPlainObject(src) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend({ + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + // See test/unit/core.js for details concerning isFunction. + // Since version 1.3, DOM methods and functions like alert + // aren't supported. They return false on IE (#2968). + isFunction: function( obj ) { + return jQuery.type(obj) === "function"; + }, + + isArray: Array.isArray || function( obj ) { + return jQuery.type(obj) === "array"; + }, + + isWindow: function( obj ) { + /* jshint eqeqeq: false */ + return obj != null && obj == obj.window; + }, + + isNumeric: function( obj ) { + // parseFloat NaNs numeric-cast false positives (null|true|false|"") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + return !jQuery.isArray( obj ) && obj - parseFloat( obj ) >= 0; + }, + + isEmptyObject: function( obj ) { + var name; + for ( name in obj ) { + return false; + } + return true; + }, + + isPlainObject: function( obj ) { + var key; + + // Must be an Object. + // Because of IE, we also have to check the presence of the constructor property. + // Make sure that DOM nodes and window objects don't pass through, as well + if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { + return false; + } + + try { + // Not own constructor property must be Object + if ( obj.constructor && + !hasOwn.call(obj, "constructor") && + !hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { + return false; + } + } catch ( e ) { + // IE8,9 Will throw exceptions on certain host objects #9897 + return false; + } + + // Support: IE<9 + // Handle iteration over inherited properties before own properties. + if ( support.ownLast ) { + for ( key in obj ) { + return hasOwn.call( obj, key ); + } + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + for ( key in obj ) {} + + return key === undefined || hasOwn.call( obj, key ); + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call(obj) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + // Workarounds based on findings by Jim Driscoll + // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context + globalEval: function( data ) { + if ( data && jQuery.trim( data ) ) { + // We use execScript on Internet Explorer + // We use an anonymous function so that context is window + // rather than jQuery in Firefox + ( window.execScript || function( data ) { + window[ "eval" ].call( window, data ); + } )( data ); + } + }, + + // Convert dashed to camelCase; used by the css and data modules + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + // args is for internal usage only + each: function( obj, callback, args ) { + var value, + i = 0, + length = obj.length, + isArray = isArraylike( obj ); + + if ( args ) { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } + + // A special, fast, case for the most common use of each + } else { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } + } + + return obj; + }, + + // Support: Android<4.1, IE<9 + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArraylike( Object(arr) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + var len; + + if ( arr ) { + if ( indexOf ) { + return indexOf.call( arr, elem, i ); + } + + len = arr.length; + i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; + + for ( ; i < len; i++ ) { + // Skip accessing in sparse arrays + if ( i in arr && arr[ i ] === elem ) { + return i; + } + } + } + + return -1; + }, + + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + while ( j < len ) { + first[ i++ ] = second[ j++ ]; + } + + // Support: IE<9 + // Workaround casting of .length to NaN on otherwise arraylike objects (e.g., NodeLists) + if ( len !== len ) { + while ( second[j] !== undefined ) { + first[ i++ ] = second[ j++ ]; + } + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var value, + i = 0, + length = elems.length, + isArray = isArraylike( elems ), + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var args, proxy, tmp; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: function() { + return +( new Date() ); + }, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +}); + +// Populate the class2type map +jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +}); + +function isArraylike( obj ) { + var length = obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + if ( obj.nodeType === 1 && length ) { + return true; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v1.10.19 + * http://sizzlejs.com/ + * + * Copyright 2013 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-04-18 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + -(new Date()), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // General-purpose constants + strundefined = typeof undefined, + MAX_NEGATIVE = 1 << 31, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf if we can't use a native one + indexOf = arr.indexOf || function( elem ) { + var i = 0, + len = this.length; + for ( ; i < len; i++ ) { + if ( this[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + // http://www.w3.org/TR/css3-syntax/#characters + characterEncoding = "(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+", + + // Loosely modeled on CSS identifier characters + // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors + // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = characterEncoding.replace( "w", "w#" ), + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + characterEncoding + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + characterEncoding + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + characterEncoding + ")" ), + "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), + "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + rescape = /'|\\/g, + + // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }; + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var match, elem, m, nodeType, + // QSA vars + i, groups, old, nid, newContext, newSelector; + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + + context = context || document; + results = results || []; + + if ( !selector || typeof selector !== "string" ) { + return results; + } + + if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { + return []; + } + + if ( documentIsHTML && !seed ) { + + // Shortcuts + if ( (match = rquickExpr.exec( selector )) ) { + // Speed-up: Sizzle("#ID") + if ( (m = match[1]) ) { + if ( nodeType === 9 ) { + elem = context.getElementById( m ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document (jQuery #6963) + if ( elem && elem.parentNode ) { + // Handle the case where IE, Opera, and Webkit return items + // by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + } else { + // Context is not a document + if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && + contains( context, elem ) && elem.id === m ) { + results.push( elem ); + return results; + } + } + + // Speed-up: Sizzle("TAG") + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Speed-up: Sizzle(".CLASS") + } else if ( (m = match[3]) && support.getElementsByClassName && context.getElementsByClassName ) { + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // QSA path + if ( support.qsa && (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + nid = old = expando; + newContext = context; + newSelector = nodeType === 9 && selector; + + // qSA works strangely on Element-rooted queries + // We can work around this by specifying an extra ID on the root + // and working up from there (Thanks to Andrew Dupont for the technique) + // IE 8 doesn't work on object elements + if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { + groups = tokenize( selector ); + + if ( (old = context.getAttribute("id")) ) { + nid = old.replace( rescape, "\\$&" ); + } else { + context.setAttribute( "id", nid ); + } + nid = "[id='" + nid + "'] "; + + i = groups.length; + while ( i-- ) { + groups[i] = nid + toSelector( groups[i] ); + } + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || context; + newSelector = groups.join(","); + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch(qsaError) { + } finally { + if ( !old ) { + context.removeAttribute("id"); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {Function(string, Object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created div and expects a boolean result + */ +function assert( fn ) { + var div = document.createElement("div"); + + try { + return !!fn( div ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( div.parentNode ) { + div.parentNode.removeChild( div ); + } + // release memory in IE + div = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = attrs.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + ( ~b.sourceIndex || MAX_NEGATIVE ) - + ( ~a.sourceIndex || MAX_NEGATIVE ); + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== strundefined && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, + doc = node ? node.ownerDocument || node : preferredDoc, + parent = doc.defaultView; + + // If no document and documentElement is available, return + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Set our document + document = doc; + docElem = doc.documentElement; + + // Support tests + documentIsHTML = !isXML( doc ); + + // Support: IE>8 + // If iframe document is assigned to "document" variable and if iframe has been reloaded, + // IE will throw "permission denied" error when accessing "document" variable, see jQuery #13936 + // IE6-8 do not support the defaultView property so parent will be undefined + if ( parent && parent !== parent.top ) { + // IE11 does not have attachEvent, so all must suffer + if ( parent.addEventListener ) { + parent.addEventListener( "unload", function() { + setDocument(); + }, false ); + } else if ( parent.attachEvent ) { + parent.attachEvent( "onunload", function() { + setDocument(); + }); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties (excepting IE8 booleans) + support.attributes = assert(function( div ) { + div.className = "i"; + return !div.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( div ) { + div.appendChild( doc.createComment("") ); + return !div.getElementsByTagName("*").length; + }); + + // Check if getElementsByClassName can be trusted + support.getElementsByClassName = rnative.test( doc.getElementsByClassName ) && assert(function( div ) { + div.innerHTML = "
"; + + // Support: Safari<4 + // Catch class over-caching + div.firstChild.className = "i"; + // Support: Opera<10 + // Catch gEBCN failure to find non-leading classes + return div.getElementsByClassName("i").length === 2; + }); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( div ) { + docElem.appendChild( div ).id = expando; + return !doc.getElementsByName || !doc.getElementsByName( expando ).length; + }); + + // ID find and filter + if ( support.getById ) { + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== strundefined && documentIsHTML ) { + var m = context.getElementById( id ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + return m && m.parentNode ? [ m ] : []; + } + }; + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + } else { + // Support: IE6/7 + // getElementById is not reliable as a find shortcut + delete Expr.find["ID"]; + + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== strundefined ) { + return context.getElementsByTagName( tag ); + } + } : + function( tag, context ) { + var elem, + tmp = [], + i = 0, + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== strundefined && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See http://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( doc.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( div ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // http://bugs.jquery.com/ticket/12359 + div.innerHTML = ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // http://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( div.querySelectorAll("[msallowclip^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !div.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + }); + + assert(function( div ) { + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = doc.createElement("input"); + input.setAttribute( "type", "hidden" ); + div.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( div.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":enabled").length ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + div.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( div ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( div, "div" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( div, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully does not implement inclusive descendent + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === doc || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === doc || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === doc ? -1 : + b === doc ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return doc; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch(e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== strundefined && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, outerCache, node, diff, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + // Seek `elem` from a previously-cached index + outerCache = parent[ expando ] || (parent[ expando ] = {}); + cache = outerCache[ type ] || []; + nodeIndex = cache[0] === dirruns && cache[1]; + diff = cache[0] === dirruns && cache[2]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + outerCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + // Use previously-cached element index if available + } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { + diff = cache[1]; + + // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) + } else { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { + // Cache the index of each encountered element + if ( useCache ) { + (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf.call( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": function( elem ) { + return elem.disabled === false; + }, + + "disabled": function( elem ) { + return elem.disabled === true; + }, + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + checkNonElements = base && dir === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + if ( (oldCache = outerCache[ dir ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + outerCache[ dir ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf.call( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context !== document && context; + } + + // Add elements passing elementMatchers directly to results + // Keep `i` a string if there are no elements so `matchedCount` will be "00" below + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // Apply set filters to unmatched elements + matchedCount += i; + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is no seed and only one group + if ( match.length === 1 ) { + + // Take a shortcut and set the context if the root selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + support.getById && context.nodeType === 9 && documentIsHTML && + Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome<14 +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( div1 ) { + // Should return 1, but returns 4 (following) + return div1.compareDocumentPosition( document.createElement("div") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( div ) { + div.innerHTML = ""; + return div.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( div ) { + div.innerHTML = ""; + div.firstChild.setAttribute( "value", "" ); + return div.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( div ) { + return div.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; +jQuery.expr[":"] = jQuery.expr.pseudos; +jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; + + + +var rneedsContext = jQuery.expr.match.needsContext; + +var rsingleTag = (/^<(\w+)\s*\/?>(?:<\/\1>|)$/); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + /* jshint -W018 */ + return !!qualifier.call( elem, i, elem ) !== not; + }); + + } + + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + }); + + } + + if ( typeof qualifier === "string" ) { + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + qualifier = jQuery.filter( qualifier, elements ); + } + + return jQuery.grep( elements, function( elem ) { + return ( jQuery.inArray( elem, qualifier ) >= 0 ) !== not; + }); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + return elems.length === 1 && elem.nodeType === 1 ? + jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [] : + jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + })); +}; + +jQuery.fn.extend({ + find: function( selector ) { + var i, + ret = [], + self = this, + len = self.length; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + }) ); + } + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + // Needed because $( selector, context ) becomes $( context ).find( selector ) + ret = this.pushStack( len > 1 ? jQuery.unique( ret ) : ret ); + ret.selector = this.selector ? this.selector + " " + selector : selector; + return ret; + }, + filter: function( selector ) { + return this.pushStack( winnow(this, selector || [], false) ); + }, + not: function( selector ) { + return this.pushStack( winnow(this, selector || [], true) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +}); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // Use the correct document accordingly with window argument (sandbox) + document = window.document, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/, + + init = jQuery.fn.init = function( selector, context ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && (match[1] || !context) ) { + + // HANDLE: $(html) -> $(array) + if ( match[1] ) { + context = context instanceof jQuery ? context[0] : context; + + // scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[1], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[2] ); + + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + if ( elem && elem.parentNode ) { + // Handle the case where IE and Opera return items + // by name instead of ID + if ( elem.id !== match[2] ) { + return rootjQuery.find( selector ); + } + + // Otherwise, we inject the element directly into the jQuery object + this.length = 1; + this[0] = elem; + } + + this.context = document; + this.selector = selector; + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || rootjQuery ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this.context = this[0] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return typeof rootjQuery.ready !== "undefined" ? + rootjQuery.ready( selector ) : + // Execute immediately if ready is not present + selector( jQuery ); + } + + if ( selector.selector !== undefined ) { + this.selector = selector.selector; + this.context = selector.context; + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + // methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.extend({ + dir: function( elem, dir, until ) { + var matched = [], + cur = elem[ dir ]; + + while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { + if ( cur.nodeType === 1 ) { + matched.push( cur ); + } + cur = cur[dir]; + } + return matched; + }, + + sibling: function( n, elem ) { + var r = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + r.push( n ); + } + } + + return r; + } +}); + +jQuery.fn.extend({ + has: function( target ) { + var i, + targets = jQuery( target, this ), + len = targets.length; + + return this.filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( this, targets[i] ) ) { + return true; + } + } + }); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? + jQuery( selectors, context || this.context ) : + 0; + + for ( ; i < l; i++ ) { + for ( cur = this[i]; cur && cur !== context; cur = cur.parentNode ) { + // Always skip document fragments + if ( cur.nodeType < 11 && (pos ? + pos.index(cur) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector(cur, selectors)) ) { + + matched.push( cur ); + break; + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.unique( matched ) : matched ); + }, + + // Determine the position of an element within + // the matched set of elements + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[0] && this[0].parentNode ) ? this.first().prevAll().length : -1; + } + + // index in selector + if ( typeof elem === "string" ) { + return jQuery.inArray( this[0], jQuery( elem ) ); + } + + // Locate the position of the desired element + return jQuery.inArray( + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[0] : elem, this ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.unique( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter(selector) + ); + } +}); + +function sibling( cur, dir ) { + do { + cur = cur[ dir ]; + } while ( cur && cur.nodeType !== 1 ); + + return cur; +} + +jQuery.each({ + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return jQuery.dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return jQuery.dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return jQuery.dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return jQuery.dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return jQuery.dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return jQuery.dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return jQuery.sibling( elem.firstChild ); + }, + contents: function( elem ) { + return jQuery.nodeName( elem, "iframe" ) ? + elem.contentDocument || elem.contentWindow.document : + jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var ret = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + ret = jQuery.filter( selector, ret ); + } + + if ( this.length > 1 ) { + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + ret = jQuery.unique( ret ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + ret = ret.reverse(); + } + } + + return this.pushStack( ret ); + }; +}); +var rnotwhite = (/\S+/g); + + + +// String to Object options format cache +var optionsCache = {}; + +// Convert String-formatted options into Object-formatted ones and store in cache +function createOptions( options ) { + var object = optionsCache[ options ] = {}; + jQuery.each( options.match( rnotwhite ) || [], function( _, flag ) { + object[ flag ] = true; + }); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + ( optionsCache[ options ] || createOptions( options ) ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + // Last fire value (for non-forgettable lists) + memory, + // Flag to know if list was already fired + fired, + // End of the loop when firing + firingLength, + // Index of currently firing callback (modified by remove if needed) + firingIndex, + // First callback to fire (used internally by add and fireWith) + firingStart, + // Actual callback list + list = [], + // Stack of fire calls for repeatable lists + stack = !options.once && [], + // Fire callbacks + fire = function( data ) { + memory = options.memory && data; + fired = true; + firingIndex = firingStart || 0; + firingStart = 0; + firingLength = list.length; + firing = true; + for ( ; list && firingIndex < firingLength; firingIndex++ ) { + if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { + memory = false; // To prevent further calls using add + break; + } + } + firing = false; + if ( list ) { + if ( stack ) { + if ( stack.length ) { + fire( stack.shift() ); + } + } else if ( memory ) { + list = []; + } else { + self.disable(); + } + } + }, + // Actual Callbacks object + self = { + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + // First, we save the current length + var start = list.length; + (function add( args ) { + jQuery.each( args, function( _, arg ) { + var type = jQuery.type( arg ); + if ( type === "function" ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && type !== "string" ) { + // Inspect recursively + add( arg ); + } + }); + })( arguments ); + // Do we need to add the callbacks to the + // current firing batch? + if ( firing ) { + firingLength = list.length; + // With memory, if we're not firing then + // we should call right away + } else if ( memory ) { + firingStart = start; + fire( memory ); + } + } + return this; + }, + // Remove a callback from the list + remove: function() { + if ( list ) { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + // Handle firing indexes + if ( firing ) { + if ( index <= firingLength ) { + firingLength--; + } + if ( index <= firingIndex ) { + firingIndex--; + } + } + } + }); + } + return this; + }, + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? jQuery.inArray( fn, list ) > -1 : !!( list && list.length ); + }, + // Remove all callbacks from the list + empty: function() { + list = []; + firingLength = 0; + return this; + }, + // Have the list do nothing anymore + disable: function() { + list = stack = memory = undefined; + return this; + }, + // Is it disabled? + disabled: function() { + return !list; + }, + // Lock the list in its current state + lock: function() { + stack = undefined; + if ( !memory ) { + self.disable(); + } + return this; + }, + // Is it locked? + locked: function() { + return !stack; + }, + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( list && ( !fired || stack ) ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + if ( firing ) { + stack.push( args ); + } else { + fire( args ); + } + } + return this; + }, + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +jQuery.extend({ + + Deferred: function( func ) { + var tuples = [ + // action, add listener, listener list, final state + [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], + [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], + [ "notify", "progress", jQuery.Callbacks("memory") ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + then: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + return jQuery.Deferred(function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + var fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; + // deferred[ done | fail | progress ] for forwarding actions to newDefer + deferred[ tuple[1] ](function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .done( newDefer.resolve ) + .fail( newDefer.reject ) + .progress( newDefer.notify ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); + } + }); + }); + fns = null; + }).promise(); + }, + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Keep pipe for back-compat + promise.pipe = promise.then; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 3 ]; + + // promise[ done | fail | progress ] = list.add + promise[ tuple[1] ] = list.add; + + // Handle state + if ( stateString ) { + list.add(function() { + // state = [ resolved | rejected ] + state = stateString; + + // [ reject_list | resolve_list ].disable; progress_list.lock + }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); + } + + // deferred[ resolve | reject | notify ] + deferred[ tuple[0] ] = function() { + deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); + return this; + }; + deferred[ tuple[0] + "With" ] = list.fireWith; + }); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( subordinate /* , ..., subordinateN */ ) { + var i = 0, + resolveValues = slice.call( arguments ), + length = resolveValues.length, + + // the count of uncompleted subordinates + remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, + + // the master Deferred. If resolveValues consist of only a single Deferred, just use that. + deferred = remaining === 1 ? subordinate : jQuery.Deferred(), + + // Update function for both resolve and progress values + updateFunc = function( i, contexts, values ) { + return function( value ) { + contexts[ i ] = this; + values[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( values === progressValues ) { + deferred.notifyWith( contexts, values ); + + } else if ( !(--remaining) ) { + deferred.resolveWith( contexts, values ); + } + }; + }, + + progressValues, progressContexts, resolveContexts; + + // add listeners to Deferred subordinates; treat others as resolved + if ( length > 1 ) { + progressValues = new Array( length ); + progressContexts = new Array( length ); + resolveContexts = new Array( length ); + for ( ; i < length; i++ ) { + if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { + resolveValues[ i ].promise() + .done( updateFunc( i, resolveContexts, resolveValues ) ) + .fail( deferred.reject ) + .progress( updateFunc( i, progressContexts, progressValues ) ); + } else { + --remaining; + } + } + } + + // if we're not waiting on anything, resolve the master + if ( !remaining ) { + deferred.resolveWith( resolveContexts, resolveValues ); + } + + return deferred.promise(); + } +}); + + +// The deferred used on DOM ready +var readyList; + +jQuery.fn.ready = function( fn ) { + // Add the callback + jQuery.ready.promise().done( fn ); + + return this; +}; + +jQuery.extend({ + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Hold (or release) the ready event + holdReady: function( hold ) { + if ( hold ) { + jQuery.readyWait++; + } else { + jQuery.ready( true ); + } + }, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( !document.body ) { + return setTimeout( jQuery.ready ); + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + + // Trigger any bound ready events + if ( jQuery.fn.triggerHandler ) { + jQuery( document ).triggerHandler( "ready" ); + jQuery( document ).off( "ready" ); + } + } +}); + +/** + * Clean-up method for dom ready events + */ +function detach() { + if ( document.addEventListener ) { + document.removeEventListener( "DOMContentLoaded", completed, false ); + window.removeEventListener( "load", completed, false ); + + } else { + document.detachEvent( "onreadystatechange", completed ); + window.detachEvent( "onload", completed ); + } +} + +/** + * The ready event handler and self cleanup method + */ +function completed() { + // readyState === "complete" is good enough for us to call the dom ready in oldIE + if ( document.addEventListener || event.type === "load" || document.readyState === "complete" ) { + detach(); + jQuery.ready(); + } +} + +jQuery.ready.promise = function( obj ) { + if ( !readyList ) { + + readyList = jQuery.Deferred(); + + // Catch cases where $(document).ready() is called after the browser event has already occurred. + // we once tried to use readyState "interactive" here, but it caused issues like the one + // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 + if ( document.readyState === "complete" ) { + // Handle it asynchronously to allow scripts the opportunity to delay ready + setTimeout( jQuery.ready ); + + // Standards-based browsers support DOMContentLoaded + } else if ( document.addEventListener ) { + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed, false ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed, false ); + + // If IE event model is used + } else { + // Ensure firing before onload, maybe late but safe also for iframes + document.attachEvent( "onreadystatechange", completed ); + + // A fallback to window.onload, that will always work + window.attachEvent( "onload", completed ); + + // If IE and not a frame + // continually check to see if the document is ready + var top = false; + + try { + top = window.frameElement == null && document.documentElement; + } catch(e) {} + + if ( top && top.doScroll ) { + (function doScrollCheck() { + if ( !jQuery.isReady ) { + + try { + // Use the trick by Diego Perini + // http://javascript.nwbox.com/IEContentLoaded/ + top.doScroll("left"); + } catch(e) { + return setTimeout( doScrollCheck, 50 ); + } + + // detach all dom ready events + detach(); + + // and execute any waiting functions + jQuery.ready(); + } + })(); + } + } + } + return readyList.promise( obj ); +}; + + +var strundefined = typeof undefined; + + + +// Support: IE<9 +// Iteration over object's inherited properties before its own +var i; +for ( i in jQuery( support ) ) { + break; +} +support.ownLast = i !== "0"; + +// Note: most support tests are defined in their respective modules. +// false until the test is run +support.inlineBlockNeedsLayout = false; + +// Execute ASAP in case we need to set body.style.zoom +jQuery(function() { + // Minified: var a,b,c,d + var val, div, body, container; + + body = document.getElementsByTagName( "body" )[ 0 ]; + if ( !body || !body.style ) { + // Return for frameset docs that don't have a body + return; + } + + // Setup + div = document.createElement( "div" ); + container = document.createElement( "div" ); + container.style.cssText = "position:absolute;border:0;width:0;height:0;top:0;left:-9999px"; + body.appendChild( container ).appendChild( div ); + + if ( typeof div.style.zoom !== strundefined ) { + // Support: IE<8 + // Check if natively block-level elements act like inline-block + // elements when setting their display to 'inline' and giving + // them layout + div.style.cssText = "display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1"; + + support.inlineBlockNeedsLayout = val = div.offsetWidth === 3; + if ( val ) { + // Prevent IE 6 from affecting layout for positioned elements #11048 + // Prevent IE from shrinking the body in IE 7 mode #12869 + // Support: IE<8 + body.style.zoom = 1; + } + } + + body.removeChild( container ); +}); + + + + +(function() { + var div = document.createElement( "div" ); + + // Execute the test only if not already executed in another module. + if (support.deleteExpando == null) { + // Support: IE<9 + support.deleteExpando = true; + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + } + + // Null elements to avoid leaks in IE. + div = null; +})(); + + +/** + * Determines whether an object can have data + */ +jQuery.acceptData = function( elem ) { + var noData = jQuery.noData[ (elem.nodeName + " ").toLowerCase() ], + nodeType = +elem.nodeType || 1; + + // Do not set data on non-element DOM nodes because it will not be cleared (#8335). + return nodeType !== 1 && nodeType !== 9 ? + false : + + // Nodes accept data unless otherwise specified; rejection can be conditional + !noData || noData !== true && elem.getAttribute("classid") === noData; +}; + + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /([A-Z])/g; + +function dataAttr( elem, key, data ) { + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + + var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); + + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = data === "true" ? true : + data === "false" ? false : + data === "null" ? null : + // Only convert to a number if it doesn't change the string + +data + "" === data ? +data : + rbrace.test( data ) ? jQuery.parseJSON( data ) : + data; + } catch( e ) {} + + // Make sure we set the data so it isn't changed later + jQuery.data( elem, key, data ); + + } else { + data = undefined; + } + } + + return data; +} + +// checks a cache object for emptiness +function isEmptyDataObject( obj ) { + var name; + for ( name in obj ) { + + // if the public data object is empty, the private is still empty + if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { + continue; + } + if ( name !== "toJSON" ) { + return false; + } + } + + return true; +} + +function internalData( elem, name, data, pvt /* Internal Use Only */ ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var ret, thisCache, + internalKey = jQuery.expando, + + // We have to handle DOM nodes and JS objects differently because IE6-7 + // can't GC object references properly across the DOM-JS boundary + isNode = elem.nodeType, + + // Only DOM nodes need the global jQuery cache; JS object data is + // attached directly to the object so GC can occur automatically + cache = isNode ? jQuery.cache : elem, + + // Only defining an ID for JS objects if its cache already exists allows + // the code to shortcut on the same path as a DOM node with no cache + id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; + + // Avoid doing any more work than we need to when trying to get data on an + // object that has no data at all + if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && data === undefined && typeof name === "string" ) { + return; + } + + if ( !id ) { + // Only DOM nodes need a new unique ID for each element since their data + // ends up in the global cache + if ( isNode ) { + id = elem[ internalKey ] = deletedIds.pop() || jQuery.guid++; + } else { + id = internalKey; + } + } + + if ( !cache[ id ] ) { + // Avoid exposing jQuery metadata on plain JS objects when the object + // is serialized using JSON.stringify + cache[ id ] = isNode ? {} : { toJSON: jQuery.noop }; + } + + // An object can be passed to jQuery.data instead of a key/value pair; this gets + // shallow copied over onto the existing cache + if ( typeof name === "object" || typeof name === "function" ) { + if ( pvt ) { + cache[ id ] = jQuery.extend( cache[ id ], name ); + } else { + cache[ id ].data = jQuery.extend( cache[ id ].data, name ); + } + } + + thisCache = cache[ id ]; + + // jQuery data() is stored in a separate object inside the object's internal data + // cache in order to avoid key collisions between internal data and user-defined + // data. + if ( !pvt ) { + if ( !thisCache.data ) { + thisCache.data = {}; + } + + thisCache = thisCache.data; + } + + if ( data !== undefined ) { + thisCache[ jQuery.camelCase( name ) ] = data; + } + + // Check for both converted-to-camel and non-converted data property names + // If a data property was specified + if ( typeof name === "string" ) { + + // First Try to find as-is property data + ret = thisCache[ name ]; + + // Test for null|undefined property data + if ( ret == null ) { + + // Try to find the camelCased property + ret = thisCache[ jQuery.camelCase( name ) ]; + } + } else { + ret = thisCache; + } + + return ret; +} + +function internalRemoveData( elem, name, pvt ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var thisCache, i, + isNode = elem.nodeType, + + // See jQuery.data for more information + cache = isNode ? jQuery.cache : elem, + id = isNode ? elem[ jQuery.expando ] : jQuery.expando; + + // If there is already no cache entry for this object, there is no + // purpose in continuing + if ( !cache[ id ] ) { + return; + } + + if ( name ) { + + thisCache = pvt ? cache[ id ] : cache[ id ].data; + + if ( thisCache ) { + + // Support array or space separated string names for data keys + if ( !jQuery.isArray( name ) ) { + + // try the string as a key before any manipulation + if ( name in thisCache ) { + name = [ name ]; + } else { + + // split the camel cased version by spaces unless a key with the spaces exists + name = jQuery.camelCase( name ); + if ( name in thisCache ) { + name = [ name ]; + } else { + name = name.split(" "); + } + } + } else { + // If "name" is an array of keys... + // When data is initially created, via ("key", "val") signature, + // keys will be converted to camelCase. + // Since there is no way to tell _how_ a key was added, remove + // both plain key and camelCase key. #12786 + // This will only penalize the array argument path. + name = name.concat( jQuery.map( name, jQuery.camelCase ) ); + } + + i = name.length; + while ( i-- ) { + delete thisCache[ name[i] ]; + } + + // If there is no data left in the cache, we want to continue + // and let the cache object itself get destroyed + if ( pvt ? !isEmptyDataObject(thisCache) : !jQuery.isEmptyObject(thisCache) ) { + return; + } + } + } + + // See jQuery.data for more information + if ( !pvt ) { + delete cache[ id ].data; + + // Don't destroy the parent cache unless the internal data object + // had been the only thing left in it + if ( !isEmptyDataObject( cache[ id ] ) ) { + return; + } + } + + // Destroy the cache + if ( isNode ) { + jQuery.cleanData( [ elem ], true ); + + // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) + /* jshint eqeqeq: false */ + } else if ( support.deleteExpando || cache != cache.window ) { + /* jshint eqeqeq: true */ + delete cache[ id ]; + + // When all else fails, null + } else { + cache[ id ] = null; + } +} + +jQuery.extend({ + cache: {}, + + // The following elements (space-suffixed to avoid Object.prototype collisions) + // throw uncatchable exceptions if you attempt to set expando properties + noData: { + "applet ": true, + "embed ": true, + // ...but Flash objects (which have this classid) *can* handle expandos + "object ": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" + }, + + hasData: function( elem ) { + elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; + return !!elem && !isEmptyDataObject( elem ); + }, + + data: function( elem, name, data ) { + return internalData( elem, name, data ); + }, + + removeData: function( elem, name ) { + return internalRemoveData( elem, name ); + }, + + // For internal use only. + _data: function( elem, name, data ) { + return internalData( elem, name, data, true ); + }, + + _removeData: function( elem, name ) { + return internalRemoveData( elem, name, true ); + } +}); + +jQuery.fn.extend({ + data: function( key, value ) { + var i, name, data, + elem = this[0], + attrs = elem && elem.attributes; + + // Special expections of .data basically thwart jQuery.access, + // so implement the relevant behavior ourselves + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = jQuery.data( elem ); + + if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE11+ + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice(5) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + jQuery._data( elem, "parsedAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each(function() { + jQuery.data( this, key ); + }); + } + + return arguments.length > 1 ? + + // Sets one value + this.each(function() { + jQuery.data( this, key, value ); + }) : + + // Gets one value + // Try to fetch any internally stored data first + elem ? dataAttr( elem, key, jQuery.data( elem, key ) ) : undefined; + }, + + removeData: function( key ) { + return this.each(function() { + jQuery.removeData( this, key ); + }); + } +}); + + +jQuery.extend({ + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = jQuery._data( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || jQuery.isArray(data) ) { + queue = jQuery._data( elem, type, jQuery.makeArray(data) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // not intended for public consumption - generates a queueHooks object, or returns the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return jQuery._data( elem, key ) || jQuery._data( elem, key, { + empty: jQuery.Callbacks("once memory").add(function() { + jQuery._removeData( elem, type + "queue" ); + jQuery._removeData( elem, key ); + }) + }); + } +}); + +jQuery.fn.extend({ + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[0], type ); + } + + return data === undefined ? + this : + this.each(function() { + var queue = jQuery.queue( this, type, data ); + + // ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[0] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + }); + }, + dequeue: function( type ) { + return this.each(function() { + jQuery.dequeue( this, type ); + }); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = jQuery._data( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +}); +var pnum = (/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/).source; + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHidden = function( elem, el ) { + // isHidden might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); + }; + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = jQuery.access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + length = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + jQuery.access( elems, fn, i, key[i], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < length; i++ ) { + fn( elems[i], key, raw ? value : value.call( elems[i], i, fn( elems[i], key ) ) ); + } + } + } + + return chainable ? + elems : + + // Gets + bulk ? + fn.call( elems ) : + length ? fn( elems[0], key ) : emptyGet; +}; +var rcheckableType = (/^(?:checkbox|radio)$/i); + + + +(function() { + // Minified: var a,b,c + var input = document.createElement( "input" ), + div = document.createElement( "div" ), + fragment = document.createDocumentFragment(); + + // Setup + div.innerHTML = "
a"; + + // IE strips leading whitespace when .innerHTML is used + support.leadingWhitespace = div.firstChild.nodeType === 3; + + // Make sure that tbody elements aren't automatically inserted + // IE will insert them into empty tables + support.tbody = !div.getElementsByTagName( "tbody" ).length; + + // Make sure that link elements get serialized correctly by innerHTML + // This requires a wrapper element in IE + support.htmlSerialize = !!div.getElementsByTagName( "link" ).length; + + // Makes sure cloning an html5 element does not cause problems + // Where outerHTML is undefined, this still works + support.html5Clone = + document.createElement( "nav" ).cloneNode( true ).outerHTML !== "<:nav>"; + + // Check if a disconnected checkbox will retain its checked + // value of true after appended to the DOM (IE6/7) + input.type = "checkbox"; + input.checked = true; + fragment.appendChild( input ); + support.appendChecked = input.checked; + + // Make sure textarea (and checkbox) defaultValue is properly cloned + // Support: IE6-IE11+ + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // #11217 - WebKit loses check when the name is after the checked attribute + fragment.appendChild( div ); + div.innerHTML = ""; + + // Support: Safari 5.1, iOS 5.1, Android 4.x, Android 2.3 + // old WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE<9 + // Opera does not clone events (and typeof div.attachEvent === undefined). + // IE9-10 clones events bound via attachEvent, but they don't trigger with .click() + support.noCloneEvent = true; + if ( div.attachEvent ) { + div.attachEvent( "onclick", function() { + support.noCloneEvent = false; + }); + + div.cloneNode( true ).click(); + } + + // Execute the test only if not already executed in another module. + if (support.deleteExpando == null) { + // Support: IE<9 + support.deleteExpando = true; + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + } +})(); + + +(function() { + var i, eventName, + div = document.createElement( "div" ); + + // Support: IE<9 (lack submit/change bubble), Firefox 23+ (lack focusin event) + for ( i in { submit: true, change: true, focusin: true }) { + eventName = "on" + i; + + if ( !(support[ i + "Bubbles" ] = eventName in window) ) { + // Beware of CSP restrictions (https://developer.mozilla.org/en/Security/CSP) + div.setAttribute( eventName, "t" ); + support[ i + "Bubbles" ] = div.attributes[ eventName ].expando === false; + } + } + + // Null elements to avoid leaks in IE. + div = null; +})(); + + +var rformElems = /^(?:input|select|textarea)$/i, + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu)|click/, + rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)$/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + var tmp, events, t, handleObjIn, + special, eventHandle, handleObj, + handlers, type, namespaces, origType, + elemData = jQuery._data( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !(events = elemData.events) ) { + events = elemData.events = {}; + } + if ( !(eventHandle = elemData.handle) ) { + eventHandle = elemData.handle = function( e ) { + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== strundefined && (!e || jQuery.event.triggered !== e.type) ? + jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : + undefined; + }; + // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events + eventHandle.elem = elem; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnotwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[t] ) || []; + type = origType = tmp[1]; + namespaces = ( tmp[2] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend({ + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join(".") + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !(handlers = events[ type ]) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener/attachEvent if the special events handler returns false + if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + // Bind the global event handler to the element + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle, false ); + + } else if ( elem.attachEvent ) { + elem.attachEvent( "on" + type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + // Nullify elem to prevent memory leaks in IE + elem = null; + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + var j, handleObj, tmp, + origCount, t, events, + special, handlers, type, + namespaces, origType, + elemData = jQuery.hasData( elem ) && jQuery._data( elem ); + + if ( !elemData || !(events = elemData.events) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnotwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[t] ) || []; + type = origType = tmp[1]; + namespaces = ( tmp[2] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[2] && new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + delete elemData.handle; + + // removeData also checks for emptiness and clears the expando if empty + // so use it instead of delete + jQuery._removeData( elem, "events" ); + } + }, + + trigger: function( event, data, elem, onlyHandlers ) { + var handle, ontype, cur, + bubbleType, special, tmp, i, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split(".") : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf(".") >= 0 ) { + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split("."); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf(":") < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join("."); + event.namespace_re = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === (elem.ownerDocument || document) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( (cur = eventPath[i++]) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && jQuery.acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( (!special._default || special._default.apply( eventPath.pop(), data ) === false) && + jQuery.acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name name as the event. + // Can't use an .isFunction() check here because IE6/7 fails that test. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && elem[ type ] && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + try { + elem[ type ](); + } catch ( e ) { + // IE<9 dies on focus/blur to hidden element (#1486,#12518) + // only reproducible on winXP IE8 native, not IE9 in IE8 mode + } + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + dispatch: function( event ) { + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( event ); + + var i, ret, handleObj, matched, j, + handlerQueue = [], + args = slice.call( arguments ), + handlers = ( jQuery._data( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[0] = event; + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( (matched = handlerQueue[ i++ ]) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( (handleObj = matched.handlers[ j++ ]) && !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or + // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). + if ( !event.namespace_re || event.namespace_re.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) + .apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( (event.result = ret) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var sel, handleObj, matches, i, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + // Black-hole SVG instance trees (#13180) + // Avoid non-left-click bubbling in Firefox (#3861) + if ( delegateCount && cur.nodeType && (!event.button || event.type !== "click") ) { + + /* jshint eqeqeq: false */ + for ( ; cur != this; cur = cur.parentNode || this ) { + /* jshint eqeqeq: true */ + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && (cur.disabled !== true || event.type !== "click") ) { + matches = []; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matches[ sel ] === undefined ) { + matches[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) >= 0 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matches[ sel ] ) { + matches.push( handleObj ); + } + } + if ( matches.length ) { + handlerQueue.push({ elem: cur, handlers: matches }); + } + } + } + } + + // Add the remaining (directly-bound) handlers + if ( delegateCount < handlers.length ) { + handlerQueue.push({ elem: this, handlers: handlers.slice( delegateCount ) }); + } + + return handlerQueue; + }, + + fix: function( event ) { + if ( event[ jQuery.expando ] ) { + return event; + } + + // Create a writable copy of the event object and normalize some properties + var i, prop, copy, + type = event.type, + originalEvent = event, + fixHook = this.fixHooks[ type ]; + + if ( !fixHook ) { + this.fixHooks[ type ] = fixHook = + rmouseEvent.test( type ) ? this.mouseHooks : + rkeyEvent.test( type ) ? this.keyHooks : + {}; + } + copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; + + event = new jQuery.Event( originalEvent ); + + i = copy.length; + while ( i-- ) { + prop = copy[ i ]; + event[ prop ] = originalEvent[ prop ]; + } + + // Support: IE<9 + // Fix target property (#1925) + if ( !event.target ) { + event.target = originalEvent.srcElement || document; + } + + // Support: Chrome 23+, Safari? + // Target should not be a text node (#504, #13143) + if ( event.target.nodeType === 3 ) { + event.target = event.target.parentNode; + } + + // Support: IE<9 + // For mouse/key events, metaKey==false if it's undefined (#3368, #11328) + event.metaKey = !!event.metaKey; + + return fixHook.filter ? fixHook.filter( event, originalEvent ) : event; + }, + + // Includes some event props shared by KeyEvent and MouseEvent + props: "altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), + + fixHooks: {}, + + keyHooks: { + props: "char charCode key keyCode".split(" "), + filter: function( event, original ) { + + // Add which for key events + if ( event.which == null ) { + event.which = original.charCode != null ? original.charCode : original.keyCode; + } + + return event; + } + }, + + mouseHooks: { + props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), + filter: function( event, original ) { + var body, eventDoc, doc, + button = original.button, + fromElement = original.fromElement; + + // Calculate pageX/Y if missing and clientX/Y available + if ( event.pageX == null && original.clientX != null ) { + eventDoc = event.target.ownerDocument || document; + doc = eventDoc.documentElement; + body = eventDoc.body; + + event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); + event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); + } + + // Add relatedTarget, if necessary + if ( !event.relatedTarget && fromElement ) { + event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + // Note: button is not normalized, so don't use it + if ( !event.which && button !== undefined ) { + event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); + } + + return event; + } + }, + + special: { + load: { + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + try { + this.focus(); + return false; + } catch ( e ) { + // Support: IE<9 + // If we error on focus to hidden element (#1486, #12518), + // let .trigger() run the handlers + } + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( jQuery.nodeName( this, "input" ) && this.type === "checkbox" && this.click ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return jQuery.nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + }, + + simulate: function( type, elem, event, bubble ) { + // Piggyback on a donor event to simulate a different one. + // Fake originalEvent to avoid donor's stopPropagation, but if the + // simulated event prevents default then we do the same on the donor. + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true, + originalEvent: {} + } + ); + if ( bubble ) { + jQuery.event.trigger( e, null, elem ); + } else { + jQuery.event.dispatch.call( elem, e ); + } + if ( e.isDefaultPrevented() ) { + event.preventDefault(); + } + } +}; + +jQuery.removeEvent = document.removeEventListener ? + function( elem, type, handle ) { + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle, false ); + } + } : + function( elem, type, handle ) { + var name = "on" + type; + + if ( elem.detachEvent ) { + + // #8545, #7054, preventing memory leaks for custom events in IE6-8 + // detachEvent needed property on element, by name of that event, to properly expose it to GC + if ( typeof elem[ name ] === strundefined ) { + elem[ name ] = null; + } + + elem.detachEvent( name, handle ); + } + }; + +jQuery.Event = function( src, props ) { + // Allow instantiation without the 'new' keyword + if ( !(this instanceof jQuery.Event) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + // Support: IE < 9, Android < 4.0 + src.returnValue === false ? + returnTrue : + returnFalse; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + if ( !e ) { + return; + } + + // If preventDefault exists, run it on the original event + if ( e.preventDefault ) { + e.preventDefault(); + + // Support: IE + // Otherwise set the returnValue property of the original event to false + } else { + e.returnValue = false; + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + if ( !e ) { + return; + } + // If stopPropagation exists, run it on the original event + if ( e.stopPropagation ) { + e.stopPropagation(); + } + + // Support: IE + // Set the cancelBubble property of the original event to true + e.cancelBubble = true; + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && e.stopImmediatePropagation ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Create mouseenter/leave events using mouseover/out and event-time checks +jQuery.each({ + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mousenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || (related !== target && !jQuery.contains( target, related )) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +}); + +// IE submit delegation +if ( !support.submitBubbles ) { + + jQuery.event.special.submit = { + setup: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Lazy-add a submit handler when a descendant form may potentially be submitted + jQuery.event.add( this, "click._submit keypress._submit", function( e ) { + // Node name check avoids a VML-related crash in IE (#9807) + var elem = e.target, + form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; + if ( form && !jQuery._data( form, "submitBubbles" ) ) { + jQuery.event.add( form, "submit._submit", function( event ) { + event._submit_bubble = true; + }); + jQuery._data( form, "submitBubbles", true ); + } + }); + // return undefined since we don't need an event listener + }, + + postDispatch: function( event ) { + // If form was submitted by the user, bubble the event up the tree + if ( event._submit_bubble ) { + delete event._submit_bubble; + if ( this.parentNode && !event.isTrigger ) { + jQuery.event.simulate( "submit", this.parentNode, event, true ); + } + } + }, + + teardown: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Remove delegated handlers; cleanData eventually reaps submit handlers attached above + jQuery.event.remove( this, "._submit" ); + } + }; +} + +// IE change delegation and checkbox/radio fix +if ( !support.changeBubbles ) { + + jQuery.event.special.change = { + + setup: function() { + + if ( rformElems.test( this.nodeName ) ) { + // IE doesn't fire change on a check/radio until blur; trigger it on click + // after a propertychange. Eat the blur-change in special.change.handle. + // This still fires onchange a second time for check/radio after blur. + if ( this.type === "checkbox" || this.type === "radio" ) { + jQuery.event.add( this, "propertychange._change", function( event ) { + if ( event.originalEvent.propertyName === "checked" ) { + this._just_changed = true; + } + }); + jQuery.event.add( this, "click._change", function( event ) { + if ( this._just_changed && !event.isTrigger ) { + this._just_changed = false; + } + // Allow triggered, simulated change events (#11500) + jQuery.event.simulate( "change", this, event, true ); + }); + } + return false; + } + // Delegated event; lazy-add a change handler on descendant inputs + jQuery.event.add( this, "beforeactivate._change", function( e ) { + var elem = e.target; + + if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "changeBubbles" ) ) { + jQuery.event.add( elem, "change._change", function( event ) { + if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { + jQuery.event.simulate( "change", this.parentNode, event, true ); + } + }); + jQuery._data( elem, "changeBubbles", true ); + } + }); + }, + + handle: function( event ) { + var elem = event.target; + + // Swallow native change events from checkbox/radio, we already triggered them above + if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { + return event.handleObj.handler.apply( this, arguments ); + } + }, + + teardown: function() { + jQuery.event.remove( this, "._change" ); + + return !rformElems.test( this.nodeName ); + } + }; +} + +// Create "bubbling" focus and blur events +if ( !support.focusinBubbles ) { + jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = jQuery._data( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + jQuery._data( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = jQuery._data( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + jQuery._removeData( doc, fix ); + } else { + jQuery._data( doc, fix, attaches ); + } + } + }; + }); +} + +jQuery.fn.extend({ + + on: function( types, selector, data, fn, /*INTERNAL*/ one ) { + var type, origFn; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + this.on( type, selector, data, types[ type ], one ); + } + return this; + } + + if ( data == null && fn == null ) { + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return this; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return this.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + }); + }, + one: function( types, selector, data, fn ) { + return this.on( types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each(function() { + jQuery.event.remove( this, types, fn, selector ); + }); + }, + + trigger: function( type, data ) { + return this.each(function() { + jQuery.event.trigger( type, data, this ); + }); + }, + triggerHandler: function( type, data ) { + var elem = this[0]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +}); + + +function createSafeFragment( document ) { + var list = nodeNames.split( "|" ), + safeFrag = document.createDocumentFragment(); + + if ( safeFrag.createElement ) { + while ( list.length ) { + safeFrag.createElement( + list.pop() + ); + } + } + return safeFrag; +} + +var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + + "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", + rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, + rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\\s/>]", "i"), + rleadingWhitespace = /^\s+/, + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, + rtagName = /<([\w:]+)/, + rtbody = /\s*$/g, + + // We have to close these tags to support XHTML (#13200) + wrapMap = { + option: [ 1, "" ], + legend: [ 1, "
", "
" ], + area: [ 1, "", "" ], + param: [ 1, "", "" ], + thead: [ 1, "", "
" ], + tr: [ 2, "", "
" ], + col: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + // IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, + // unless wrapped in a div with non-breaking characters in front of it. + _default: support.htmlSerialize ? [ 0, "", "" ] : [ 1, "X
", "
" ] + }, + safeFragment = createSafeFragment( document ), + fragmentDiv = safeFragment.appendChild( document.createElement("div") ); + +wrapMap.optgroup = wrapMap.option; +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +function getAll( context, tag ) { + var elems, elem, + i = 0, + found = typeof context.getElementsByTagName !== strundefined ? context.getElementsByTagName( tag || "*" ) : + typeof context.querySelectorAll !== strundefined ? context.querySelectorAll( tag || "*" ) : + undefined; + + if ( !found ) { + for ( found = [], elems = context.childNodes || context; (elem = elems[i]) != null; i++ ) { + if ( !tag || jQuery.nodeName( elem, tag ) ) { + found.push( elem ); + } else { + jQuery.merge( found, getAll( elem, tag ) ); + } + } + } + + return tag === undefined || tag && jQuery.nodeName( context, tag ) ? + jQuery.merge( [ context ], found ) : + found; +} + +// Used in buildFragment, fixes the defaultChecked property +function fixDefaultChecked( elem ) { + if ( rcheckableType.test( elem.type ) ) { + elem.defaultChecked = elem.checked; + } +} + +// Support: IE<8 +// Manipulating tables requires a tbody +function manipulationTarget( elem, content ) { + return jQuery.nodeName( elem, "table" ) && + jQuery.nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ? + + elem.getElementsByTagName("tbody")[0] || + elem.appendChild( elem.ownerDocument.createElement("tbody") ) : + elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = (jQuery.find.attr( elem, "type" ) !== null) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + if ( match ) { + elem.type = match[1]; + } else { + elem.removeAttribute("type"); + } + return elem; +} + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var elem, + i = 0; + for ( ; (elem = elems[i]) != null; i++ ) { + jQuery._data( elem, "globalEval", !refElements || jQuery._data( refElements[i], "globalEval" ) ); + } +} + +function cloneCopyEvent( src, dest ) { + + if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { + return; + } + + var type, i, l, + oldData = jQuery._data( src ), + curData = jQuery._data( dest, oldData ), + events = oldData.events; + + if ( events ) { + delete curData.handle; + curData.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + + // make the cloned public data object a copy from the original + if ( curData.data ) { + curData.data = jQuery.extend( {}, curData.data ); + } +} + +function fixCloneNodeIssues( src, dest ) { + var nodeName, e, data; + + // We do not need to do anything for non-Elements + if ( dest.nodeType !== 1 ) { + return; + } + + nodeName = dest.nodeName.toLowerCase(); + + // IE6-8 copies events bound via attachEvent when using cloneNode. + if ( !support.noCloneEvent && dest[ jQuery.expando ] ) { + data = jQuery._data( dest ); + + for ( e in data.events ) { + jQuery.removeEvent( dest, e, data.handle ); + } + + // Event data gets referenced instead of copied if the expando gets copied too + dest.removeAttribute( jQuery.expando ); + } + + // IE blanks contents when cloning scripts, and tries to evaluate newly-set text + if ( nodeName === "script" && dest.text !== src.text ) { + disableScript( dest ).text = src.text; + restoreScript( dest ); + + // IE6-10 improperly clones children of object elements using classid. + // IE10 throws NoModificationAllowedError if parent is null, #12132. + } else if ( nodeName === "object" ) { + if ( dest.parentNode ) { + dest.outerHTML = src.outerHTML; + } + + // This path appears unavoidable for IE9. When cloning an object + // element in IE9, the outerHTML strategy above is not sufficient. + // If the src has innerHTML and the destination does not, + // copy the src.innerHTML into the dest.innerHTML. #10324 + if ( support.html5Clone && ( src.innerHTML && !jQuery.trim(dest.innerHTML) ) ) { + dest.innerHTML = src.innerHTML; + } + + } else if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + // IE6-8 fails to persist the checked state of a cloned checkbox + // or radio button. Worse, IE6-7 fail to give the cloned element + // a checked appearance if the defaultChecked value isn't also set + + dest.defaultChecked = dest.checked = src.checked; + + // IE6-7 get confused and end up setting the value of a cloned + // checkbox/radio button to an empty string instead of "on" + if ( dest.value !== src.value ) { + dest.value = src.value; + } + + // IE6-8 fails to return the selected option to the default selected + // state when cloning options + } else if ( nodeName === "option" ) { + dest.defaultSelected = dest.selected = src.defaultSelected; + + // IE6-8 fails to set the defaultValue to the correct value when + // cloning other types of input fields + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +jQuery.extend({ + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var destElements, node, clone, i, srcElements, + inPage = jQuery.contains( elem.ownerDocument, elem ); + + if ( support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { + clone = elem.cloneNode( true ); + + // IE<=8 does not properly clone detached, unknown element nodes + } else { + fragmentDiv.innerHTML = elem.outerHTML; + fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); + } + + if ( (!support.noCloneEvent || !support.noCloneChecked) && + (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { + + // We eschew Sizzle here for performance reasons: http://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + // Fix all IE cloning issues + for ( i = 0; (node = srcElements[i]) != null; ++i ) { + // Ensure that the destination node is not null; Fixes #9587 + if ( destElements[i] ) { + fixCloneNodeIssues( node, destElements[i] ); + } + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0; (node = srcElements[i]) != null; i++ ) { + cloneCopyEvent( node, destElements[i] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + destElements = srcElements = node = null; + + // Return the cloned set + return clone; + }, + + buildFragment: function( elems, context, scripts, selection ) { + var j, elem, contains, + tmp, tag, tbody, wrap, + l = elems.length, + + // Ensure a safe fragment + safe = createSafeFragment( context ), + + nodes = [], + i = 0; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || safe.appendChild( context.createElement("div") ); + + // Deserialize a standard representation + tag = (rtagName.exec( elem ) || [ "", "" ])[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + + tmp.innerHTML = wrap[1] + elem.replace( rxhtmlTag, "<$1>" ) + wrap[2]; + + // Descend through wrappers to the right content + j = wrap[0]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Manually add leading whitespace removed by IE + if ( !support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { + nodes.push( context.createTextNode( rleadingWhitespace.exec( elem )[0] ) ); + } + + // Remove IE's autoinserted from table fragments + if ( !support.tbody ) { + + // String was a , *may* have spurious + elem = tag === "table" && !rtbody.test( elem ) ? + tmp.firstChild : + + // String was a bare or + wrap[1] === "
" && !rtbody.test( elem ) ? + tmp : + 0; + + j = elem && elem.childNodes.length; + while ( j-- ) { + if ( jQuery.nodeName( (tbody = elem.childNodes[j]), "tbody" ) && !tbody.childNodes.length ) { + elem.removeChild( tbody ); + } + } + } + + jQuery.merge( nodes, tmp.childNodes ); + + // Fix #12392 for WebKit and IE > 9 + tmp.textContent = ""; + + // Fix #12392 for oldIE + while ( tmp.firstChild ) { + tmp.removeChild( tmp.firstChild ); + } + + // Remember the top-level container for proper cleanup + tmp = safe.lastChild; + } + } + } + + // Fix #11356: Clear elements from fragment + if ( tmp ) { + safe.removeChild( tmp ); + } + + // Reset defaultChecked for any radios and checkboxes + // about to be appended to the DOM in IE 6/7 (#8060) + if ( !support.appendChecked ) { + jQuery.grep( getAll( nodes, "input" ), fixDefaultChecked ); + } + + i = 0; + while ( (elem = nodes[ i++ ]) ) { + + // #4087 - If origin and destination elements are the same, and this is + // that element, do not do anything + if ( selection && jQuery.inArray( elem, selection ) !== -1 ) { + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( safe.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( (elem = tmp[ j++ ]) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + tmp = null; + + return safe; + }, + + cleanData: function( elems, /* internal */ acceptData ) { + var elem, type, id, data, + i = 0, + internalKey = jQuery.expando, + cache = jQuery.cache, + deleteExpando = support.deleteExpando, + special = jQuery.event.special; + + for ( ; (elem = elems[i]) != null; i++ ) { + if ( acceptData || jQuery.acceptData( elem ) ) { + + id = elem[ internalKey ]; + data = id && cache[ id ]; + + if ( data ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Remove cache only if it was not already removed by jQuery.event.remove + if ( cache[ id ] ) { + + delete cache[ id ]; + + // IE does not allow us to delete expando properties from nodes, + // nor does it have a removeAttribute function on Document nodes; + // we must handle all of these cases + if ( deleteExpando ) { + delete elem[ internalKey ]; + + } else if ( typeof elem.removeAttribute !== strundefined ) { + elem.removeAttribute( internalKey ); + + } else { + elem[ internalKey ] = null; + } + + deletedIds.push( id ); + } + } + } + } + } +}); + +jQuery.fn.extend({ + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); + }, null, value, arguments.length ); + }, + + append: function() { + return this.domManip( arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + }); + }, + + prepend: function() { + return this.domManip( arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + }); + }, + + before: function() { + return this.domManip( arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + }); + }, + + after: function() { + return this.domManip( arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + }); + }, + + remove: function( selector, keepData /* Internal Use Only */ ) { + var elem, + elems = selector ? jQuery.filter( selector, this ) : this, + i = 0; + + for ( ; (elem = elems[i]) != null; i++ ) { + + if ( !keepData && elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem ) ); + } + + if ( elem.parentNode ) { + if ( keepData && jQuery.contains( elem.ownerDocument, elem ) ) { + setGlobalEval( getAll( elem, "script" ) ); + } + elem.parentNode.removeChild( elem ); + } + } + + return this; + }, + + empty: function() { + var elem, + i = 0; + + for ( ; (elem = this[i]) != null; i++ ) { + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + } + + // Remove any remaining nodes + while ( elem.firstChild ) { + elem.removeChild( elem.firstChild ); + } + + // If this is a select, ensure that it displays empty (#12336) + // Support: IE<9 + if ( elem.options && jQuery.nodeName( elem, "select" ) ) { + elem.options.length = 0; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map(function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + }); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined ) { + return elem.nodeType === 1 ? + elem.innerHTML.replace( rinlinejQuery, "" ) : + undefined; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + ( support.htmlSerialize || !rnoshimcache.test( value ) ) && + ( support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && + !wrapMap[ (rtagName.exec( value ) || [ "", "" ])[ 1 ].toLowerCase() ] ) { + + value = value.replace( rxhtmlTag, "<$1>" ); + + try { + for (; i < l; i++ ) { + // Remove element nodes and prevent memory leaks + elem = this[i] || {}; + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch(e) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var arg = arguments[ 0 ]; + + // Make the changes, replacing each context element with the new content + this.domManip( arguments, function( elem ) { + arg = this.parentNode; + + jQuery.cleanData( getAll( this ) ); + + if ( arg ) { + arg.replaceChild( elem, this ); + } + }); + + // Force removal if there was no new content (e.g., from empty arguments) + return arg && (arg.length || arg.nodeType) ? this : this.remove(); + }, + + detach: function( selector ) { + return this.remove( selector, true ); + }, + + domManip: function( args, callback ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var first, node, hasScripts, + scripts, doc, fragment, + i = 0, + l = this.length, + set = this, + iNoClone = l - 1, + value = args[0], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return this.each(function( index ) { + var self = set.eq( index ); + if ( isFunction ) { + args[0] = value.call( this, index, self.html() ); + } + self.domManip( args, callback ); + }); + } + + if ( l ) { + fragment = jQuery.buildFragment( args, this[ 0 ].ownerDocument, false, this ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + if ( first ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( this[i], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !jQuery._data( node, "globalEval" ) && jQuery.contains( doc, node ) ) { + + if ( node.src ) { + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + jQuery.globalEval( ( node.text || node.textContent || node.innerHTML || "" ).replace( rcleanScript, "" ) ); + } + } + } + } + + // Fix #11809: Avoid leaking memory + fragment = first = null; + } + } + + return this; + } +}); + +jQuery.each({ + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + i = 0, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone(true); + jQuery( insert[i] )[ original ]( elems ); + + // Modern browsers can apply jQuery collections as arrays, but oldIE needs a .get() + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +}); + + +var iframe, + elemdisplay = {}; + +/** + * Retrieve the actual display of a element + * @param {String} name nodeName of the element + * @param {Object} doc Document object + */ +// Called only from within defaultDisplay +function actualDisplay( name, doc ) { + var style, + elem = jQuery( doc.createElement( name ) ).appendTo( doc.body ), + + // getDefaultComputedStyle might be reliably used only on attached element + display = window.getDefaultComputedStyle && ( style = window.getDefaultComputedStyle( elem[ 0 ] ) ) ? + + // Use of this method is a temporary fix (more like optmization) until something better comes along, + // since it was removed from specification and supported only in FF + style.display : jQuery.css( elem[ 0 ], "display" ); + + // We don't have any data stored on the element, + // so use "detach" method as fast way to get rid of the element + elem.detach(); + + return display; +} + +/** + * Try to determine the default display value of an element + * @param {String} nodeName + */ +function defaultDisplay( nodeName ) { + var doc = document, + display = elemdisplay[ nodeName ]; + + if ( !display ) { + display = actualDisplay( nodeName, doc ); + + // If the simple way fails, read from inside an iframe + if ( display === "none" || !display ) { + + // Use the already-created iframe if possible + iframe = (iframe || jQuery( "