diff --git a/release_doc/0.9.0/.buildinfo b/release_doc/0.9.0/.buildinfo new file mode 100644 index 0000000000000000000000000000000000000000..930225e7b336d866ca9490cf1b76fe17cd05a33b --- /dev/null +++ b/release_doc/0.9.0/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 2e7327c43832ec6188cc63b41afbbc75 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/release_doc/0.9.0/doc/.buildinfo b/release_doc/0.9.0/doc/.buildinfo new file mode 100644 index 0000000000000000000000000000000000000000..765cc9cc2194d8c057838a7f05829970145b0b80 --- /dev/null +++ b/release_doc/0.9.0/doc/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: abb235454c522821afda02c2aa921d6f +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/release_doc/0.9.0/doc/_images/FullyConnected.jpg b/release_doc/0.9.0/doc/_images/FullyConnected.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2241f401434e527f95ee4e0e541a3f2ff78fd1e Binary files /dev/null and b/release_doc/0.9.0/doc/_images/FullyConnected.jpg differ diff --git a/release_doc/0.9.0/doc/_images/NetContinuous_en.png b/release_doc/0.9.0/doc/_images/NetContinuous_en.png new file mode 100644 index 0000000000000000000000000000000000000000..7bdef1aa366711806585d35c8653c987fd63d59e Binary files /dev/null and b/release_doc/0.9.0/doc/_images/NetContinuous_en.png differ diff --git a/release_doc/0.9.0/doc/_images/NetConv_en.png b/release_doc/0.9.0/doc/_images/NetConv_en.png new file mode 100644 index 0000000000000000000000000000000000000000..ad618d1d6f8f4839f566f5f5cb5db37a4b7d9093 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/NetConv_en.png differ diff --git a/release_doc/0.9.0/doc/_images/NetLR_en.png b/release_doc/0.9.0/doc/_images/NetLR_en.png new file mode 100644 index 0000000000000000000000000000000000000000..9d514bf1b18a0c330f98c28785e5d008f409fc1d Binary files /dev/null and b/release_doc/0.9.0/doc/_images/NetLR_en.png differ diff --git a/release_doc/0.9.0/doc/_images/NetRNN_en.png b/release_doc/0.9.0/doc/_images/NetRNN_en.png new file mode 100644 index 0000000000000000000000000000000000000000..180f273d32ea59dc8ececa69c08e249f79f9d4f7 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/NetRNN_en.png differ diff --git a/release_doc/0.9.0/doc/_images/PipelineNetwork_en.jpg b/release_doc/0.9.0/doc/_images/PipelineNetwork_en.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e779aed06d5cdb2b442754e7915e79b72946418e Binary files /dev/null and b/release_doc/0.9.0/doc/_images/PipelineNetwork_en.jpg differ diff --git a/release_doc/0.9.0/doc/_images/PipelineTest_en.png b/release_doc/0.9.0/doc/_images/PipelineTest_en.png new file mode 100644 index 0000000000000000000000000000000000000000..7e7ef520b5effa2f43fd2964048f05c42f2ea890 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/PipelineTest_en.png differ diff --git a/release_doc/0.9.0/doc/_images/PipelineTrain_en.png b/release_doc/0.9.0/doc/_images/PipelineTrain_en.png new file mode 100644 index 0000000000000000000000000000000000000000..132d29bfd5d678d2518161d0b5ed2e16a233a048 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/PipelineTrain_en.png differ diff --git a/release_doc/0.9.0/doc/_images/Pipeline_en.jpg b/release_doc/0.9.0/doc/_images/Pipeline_en.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21a7a7bb6a1af746120e6f4f51f797b6aaafb9d8 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/Pipeline_en.jpg differ diff --git a/release_doc/0.9.0/doc/_images/bi_lstm.jpg b/release_doc/0.9.0/doc/_images/bi_lstm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adec1606d64d6e35ffe7e62abfa9a09309b05c84 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/bi_lstm.jpg differ diff --git a/release_doc/0.9.0/doc/_images/bi_lstm1.jpg b/release_doc/0.9.0/doc/_images/bi_lstm1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adec1606d64d6e35ffe7e62abfa9a09309b05c84 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/bi_lstm1.jpg differ diff --git a/release_doc/0.9.0/doc/_images/cifar.png b/release_doc/0.9.0/doc/_images/cifar.png new file mode 100644 index 0000000000000000000000000000000000000000..f54a0c58837cb3385b32dc57d02cec92666ef0f1 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/cifar.png differ diff --git a/release_doc/0.9.0/doc/_images/encoder-decoder-attention-model.png b/release_doc/0.9.0/doc/_images/encoder-decoder-attention-model.png new file mode 100644 index 0000000000000000000000000000000000000000..79f911d4ba12ac0c0d1a936c9df639c302786914 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/encoder-decoder-attention-model.png differ diff --git a/release_doc/0.9.0/doc/_images/encoder-decoder-attention-model1.png b/release_doc/0.9.0/doc/_images/encoder-decoder-attention-model1.png new file mode 100644 index 0000000000000000000000000000000000000000..79f911d4ba12ac0c0d1a936c9df639c302786914 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/encoder-decoder-attention-model1.png differ diff --git a/release_doc/0.9.0/doc/_images/feature.jpg b/release_doc/0.9.0/doc/_images/feature.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e3310e4ace5613917e7779d3198ccbb3cdc5ada Binary files /dev/null and b/release_doc/0.9.0/doc/_images/feature.jpg differ diff --git a/release_doc/0.9.0/doc/_images/image_classification.png b/release_doc/0.9.0/doc/_images/image_classification.png new file mode 100644 index 0000000000000000000000000000000000000000..14f255805081c1b4fab27eaf336fd389fa93ca19 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/image_classification.png differ diff --git a/release_doc/0.9.0/doc/_images/lenet.png b/release_doc/0.9.0/doc/_images/lenet.png new file mode 100644 index 0000000000000000000000000000000000000000..1e6f2b32bad797f3fccb929c72a121fc935b0cbb Binary files /dev/null and b/release_doc/0.9.0/doc/_images/lenet.png differ diff --git a/release_doc/0.9.0/doc/_images/lstm.png b/release_doc/0.9.0/doc/_images/lstm.png new file mode 100644 index 0000000000000000000000000000000000000000..aaf1fc690da2ffb8418cde5ed81848ddb5263030 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/lstm.png differ diff --git a/release_doc/0.9.0/doc/_images/network_arch.png b/release_doc/0.9.0/doc/_images/network_arch.png new file mode 100644 index 0000000000000000000000000000000000000000..4ae7864212f2a0a38102ee7ff600527ea99fec82 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/network_arch.png differ diff --git a/release_doc/0.9.0/doc/_images/neural-n-gram-model.png b/release_doc/0.9.0/doc/_images/neural-n-gram-model.png new file mode 100644 index 0000000000000000000000000000000000000000..f70b765b3fd69816345a79fc59adfea46008dbfd Binary files /dev/null and b/release_doc/0.9.0/doc/_images/neural-n-gram-model.png differ diff --git a/release_doc/0.9.0/doc/_images/parameters.png b/release_doc/0.9.0/doc/_images/parameters.png new file mode 100644 index 0000000000000000000000000000000000000000..2ec67480951e21f0400bce1c34b3108dcd65c18c Binary files /dev/null and b/release_doc/0.9.0/doc/_images/parameters.png differ diff --git a/release_doc/0.9.0/doc/_images/plot.png b/release_doc/0.9.0/doc/_images/plot.png new file mode 100644 index 0000000000000000000000000000000000000000..a31f99791c670e18bb8c62b7604ec8cb0284ffb4 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/plot.png differ diff --git a/release_doc/0.9.0/doc/_images/rec_regression_network.png b/release_doc/0.9.0/doc/_images/rec_regression_network.png new file mode 100644 index 0000000000000000000000000000000000000000..7d2b54d4fcf560cd5b667628f0012c3822efd9b2 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/rec_regression_network.png differ diff --git a/release_doc/0.9.0/doc/_images/resnet_block.jpg b/release_doc/0.9.0/doc/_images/resnet_block.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e16bd3c624030c4c09b358a015b491141b42d8f1 Binary files /dev/null and b/release_doc/0.9.0/doc/_images/resnet_block.jpg differ diff --git a/release_doc/0.9.0/doc/_images/stacked_lstm.jpg b/release_doc/0.9.0/doc/_images/stacked_lstm.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4239055050966e0095e188a8c81d860711bce29d Binary files /dev/null and b/release_doc/0.9.0/doc/_images/stacked_lstm.jpg differ diff --git a/release_doc/0.9.0/doc/_sources/algorithm/rnn/rnn.txt b/release_doc/0.9.0/doc/_sources/algorithm/rnn/rnn.txt new file mode 100644 index 0000000000000000000000000000000000000000..343f55a20e464f63f054ebe724b5ef90f848d5e9 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/algorithm/rnn/rnn.txt @@ -0,0 +1,251 @@ +Recurrent Neural Network Configuration +====================================== + +This tutorial will guide you how to configure recurrent neural network in PaddlePaddle. PaddlePaddle supports highly flexible and efficient recurrent neural network configuration. In this tutorial, you will learn how to: + +- prepare sequence data for learning recurrent neural networks. +- configure recurrent neural network architecture. +- generate sequence with learned recurrent neural network models. + +We will use vanilla recurrent neural network, and sequence to sequence model to guide you through these steps. The code of sequence to sequence model can be found at :code:`demo/seqToseq`. + +===================== +Prepare Sequence Data +===================== + +PaddlePaddle does not need any preprocessing to sequence data, such as padding. The only thing that needs to be done is to set the type of the corresponding type to input. For example, the following code snippets defines three input. All of them are sequences, and the size of them are :code:`src_dict`, :code:`trg_dict`, and :code:`trg_dict`: + +.. code-block:: python + + settings.slots = [ + integer_value_sequence(len(settings.src_dict)), + integer_value_sequence(len(settings.trg_dict)), + integer_value_sequence(len(settings.trg_dict))] + + +Then at the :code:`process` function, each :code:`yield` function will return three integer lists. Each integer list is treated as a sequence of integers: + +.. code-block:: python + + yield src_ids, trg_ids, trg_ids_next + + +For more details description of how to write a data provider, please refer to `PyDataProvider2 <../../ui/data_provider/index.html>`_. The full data provider file is located at :code:`demo/seqToseq/dataprovider.py`. + +=============================================== +Configure Recurrent Neural Network Architecture +=============================================== + +------------------------------------- +Simple Gated Recurrent Neural Network +------------------------------------- + +Recurrent neural network process a sequence at each time step sequentially. An example of the architecture of LSTM is listed below. + +.. image:: ./bi_lstm.jpg + :align: center + +Generally speaking, a recurrent network perform the following operations from :math:`t=1` to :math:`t=T`, or reversely from :math:`t=T` to :math:`t=1`. + +.. math:: + + x_{t+1} = f_x(x_t), y_t = f_y(x_t) + + +where :math:`f_x(.)` is called **step function**, and :math:`f_y(.)` is called **output function**. In vanilla recurrent neural network, both of the step function and output function are very simple. However, PaddlePaddle supports the configuration of very complex architectures by modifying these two functions. We will use the sequence to sequence model with attention as an example to demonstrate how you can configure complex recurrent neural network models. In this section, we will use a simple vanilla recurrent neural network as an example of configuring simple recurrent neural network using :code:`recurrent_group`. Notice that if you only need to use simple RNN, GRU, or LSTM, then :code:`grumemory` and :code:`lstmemory` is recommended because they are more computationally efficient than :code:`recurrent_group`. + +For vanilla RNN, at each time step, the **step function** is: + +.. math:: + + x_{t+1} = W_x x_t + W_i I_t + b + +where :math:`x_t` is the RNN state, and :math:`I_t` is the input, :math:`W_x` and :math:`W_i` are transformation matrices for RNN states and inputs, respectively. :math:`b` is the bias. +Its **output function** simply takes :math:`x_t` as the output. + +:code:`recurrent_group` is the most important tools for constructing recurrent neural networks. It defines the **step function**, **output function** and the inputs of the recurrent neural network. Notice that the :code:`step` argument of this function implements both the :code:`step function` and the :code:`output function`: + +.. code-block:: python + + def simple_rnn(input, + size=None, + name=None, + reverse=False, + rnn_bias_attr=None, + act=None, + rnn_layer_attr=None): + def __rnn_step__(ipt): + out_mem = memory(name=name, size=size) + rnn_out = mixed_layer(input = [full_matrix_projection(ipt), + full_matrix_projection(out_mem)], + name = name, + bias_attr = rnn_bias_attr, + act = act, + layer_attr = rnn_layer_attr, + size = size) + return rnn_out + return recurrent_group(name='%s_recurrent_group' % name, + step=__rnn_step__, + reverse=reverse, + input=input) + + +PaddlePaddle uses memory to construct step function. **Memory** is the most important concept when constructing recurrent neural networks in PaddlePaddle. A memory is a state that is used recurrently in step functions, such as :math:`x_{t+1} = f_x(x_t)`. One memory contains an **output** and a **input**. The output of memory at the current time step is utilized as the input of the memory at the next time step. A memory can also has a **boot layer**, whose output is utilized as the initial value of the memory. In our case, the output of the gated recurrent unit is employed as the output memory. Notice that the name of the layer :code:`rnn_out` is the same as the name of :code:`out_mem`. This means the output of the layer :code:`rnn_out` (:math:`x_{t+1}`) is utilized as the **output** of :code:`out_mem` memory. + +A memory can also be a sequence. In this case, at each time step, we have a sequence as the state of the recurrent neural network. This can be useful when constructing very complex recurrent neural network. Other advanced functions include defining multiple memories, and defining hierarchical recurrent neural network architecture using sub-sequence. + +We return :code:`rnn_out` at the end of the function. It means that the output of the layer :code:`rnn_out` is utilized as the **output** function of the gated recurrent neural network. + +----------------------------------------- +Sequence to Sequence Model with Attention +----------------------------------------- +We will use the sequence to sequence model with attention as an example to demonstrate how you can configure complex recurrent neural network models. An illustration of the sequence to sequence model with attention is shown in the following figure. + +.. image:: ./encoder-decoder-attention-model.png + :align: center + +In this model, the source sequence :math:`S = \{s_1, \dots, s_T\}` is encoded with a bidirectional gated recurrent neural networks. The hidden states of the bidirectional gated recurrent neural network :math:`H_S = \{H_1, \dots, H_T\}` is called *encoder vector* The decoder is a gated recurrent neural network. When decoding each token :math:`y_t`, the gated recurrent neural network generates a set of weights :math:`W_S^t = \{W_1^t, \dots, W_T^t\}`, which are used to compute a weighted sum of the encoder vector. The weighted sum of the encoder vector is utilized to condition the generation of the token :math:`y_t`. + +The encoder part of the model is listed below. It calls :code:`grumemory` to represent gated recurrent neural network. It is the recommended way of using recurrent neural network if the network architecture is simple, because it is faster than :code:`recurrent_group`. We have implemented most of the commonly used recurrent neural network architectures, you can refer to `Layers <../../ui/api/trainer_config_helpers/layers_index.html>`_ for more details. + +We also project the encoder vector to :code:`decoder_size` dimensional space, get the first instance of the backward recurrent network, and project it to :code:`decoder_size` dimensional space: + +.. code-block:: python + + # Define the data layer of the source sentence. + src_word_id = data_layer(name='source_language_word', size=source_dict_dim) + # Calculate the word embedding of each word. + src_embedding = embedding_layer( + input=src_word_id, + size=word_vector_dim, + param_attr=ParamAttr(name='_source_language_embedding')) + # Apply forward recurrent neural network. + src_forward = grumemory(input=src_embedding, size=encoder_size) + # Apply backward recurrent neural network. reverse=True means backward recurrent neural network. + src_backward = grumemory(input=src_embedding, + size=encoder_size, + reverse=True) + # Mix the forward and backward parts of the recurrent neural network together. + encoded_vector = concat_layer(input=[src_forward, src_backward]) + + # Project encoding vector to decoder_size. + encoder_proj = mixed_layer(input = [full_matrix_projection(encoded_vector)], + size = decoder_size) + + # Compute the first instance of the backward RNN. + backward_first = first_seq(input=src_backward) + + # Project the first instance of backward RNN to decoder size. + decoder_boot = mixed_layer(input=[full_matrix_projection(backward_first)], size=decoder_size, act=TanhActivation()) + + +The decoder uses :code:`recurrent_group` to define the recurrent neural network. The step and output functions are defined in :code:`gru_decoder_with_attention`: + +.. code-block:: python + + group_inputs=[StaticInput(input=encoded_vector,is_seq=True), + StaticInput(input=encoded_proj,is_seq=True)] + trg_embedding = embedding_layer( + input=data_layer(name='target_language_word', + size=target_dict_dim), + size=word_vector_dim, + param_attr=ParamAttr(name='_target_language_embedding')) + group_inputs.append(trg_embedding) + + # For decoder equipped with attention mechanism, in training, + # target embedding (the groudtruth) is the data input, + # while encoded source sequence is accessed to as an unbounded memory. + # StaticInput means the same value is utilized at different time steps. + # Otherwise, it is a sequence input. Inputs at different time steps are different. + # All sequence inputs should have the same length. + decoder = recurrent_group(name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs) + + +The implementation of the step function is listed as below. First, it defines the **memory** of the decoder network. Then it defines attention, gated recurrent unit step function, and the output function: + +.. code-block:: python + + def gru_decoder_with_attention(enc_vec, enc_proj, current_word): + # Defines the memory of the decoder. + # The output of this memory is defined in gru_step. + # Notice that the name of gru_step should be the same as the name of this memory. + decoder_mem = memory(name='gru_decoder', + size=decoder_size, + boot_layer=decoder_boot) + # Compute attention weighted encoder vector. + context = simple_attention(encoded_sequence=enc_vec, + encoded_proj=enc_proj, + decoder_state=decoder_mem) + # Mix the current word embedding and the attention weighted encoder vector. + decoder_inputs = mixed_layer(inputs = [full_matrix_projection(context), + full_matrix_projection(current_word)], + size = decoder_size * 3) + # Define Gated recurrent unit recurrent neural network step function. + gru_step = gru_step_layer(name='gru_decoder', + input=decoder_inputs, + output_mem=decoder_mem, + size=decoder_size) + # Defines the output function. + out = mixed_layer(input=[full_matrix_projection(input=gru_step)], + size=target_dict_dim, + bias_attr=True, + act=SoftmaxActivation()) + return out + + +================= +Generate Sequence +================= +After training the model, we can use it to generate sequences. A common practice is to use **beam search** to generate sequences. The following code snippets defines a beam search algorithm. Notice that :code:`beam_search` function assumes the output function of the :code:`step` returns a softmax normalized probability vector of the next token. We made the following changes to the model. + +* use :code:`GeneratedInput` for trg_embedding. :code:`GeneratedInput` computes the embedding of the generated token at the last time step for the input at the current time step. +* use :code:`beam_search` function. This function needs to set: + + - :code:`bos_id`: the start token. Every sentence starts with the start token. + - :code:`eos_id`: the end token. Every sentence ends with the end token. + - :code:`beam_size`: the beam size used in beam search. + - :code:`max_length`: the maximum length of the generated sentences. + +* use :code:`seqtext_printer_evaluator` to print text according to index matrix and dictionary. This function needs to set: + + - :code:`id_input`: the integer ID of the data, used to identify the corresponding output in the generated files. + - :code:`dict_file`: the dictionary file for converting word id to word. + - :code:`result_file`: the path of the generation result file. + +The code is listed below: + +.. code-block:: python + + group_inputs=[StaticInput(input=encoded_vector,is_seq=True), + StaticInput(input=encoded_proj,is_seq=True)] + # In generation, decoder predicts a next target word based on + # the encoded source sequence and the last generated target word. + # The encoded source sequence (encoder's output) must be specified by + # StaticInput which is a read-only memory. + # Here, GeneratedInputs automatically fetchs the last generated word, + # which is initialized by a start mark, such as . + trg_embedding = GeneratedInput( + size=target_dict_dim, + embedding_name='_target_language_embedding', + embedding_size=word_vector_dim) + group_inputs.append(trg_embedding) + beam_gen = beam_search(name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs, + bos_id=0, # Beginnning token. + eos_id=1, # End of sentence token. + beam_size=beam_size, + max_length=max_length) + + seqtext_printer_evaluator(input=beam_gen, + id_input=data_layer(name="sent_id", size=1), + dict_file=trg_dict_path, + result_file=gen_trans_file) + outputs(beam_gen) + + +Notice that this generation technique is only useful for decoder like generation process. If you are working on sequence tagging tasks, please refer to `Semantic Role Labeling Demo <../../demo/semantic_role_labeling/index.html>`_ for more details. + +The full configuration file is located at :code:`demo/seqToseq/seqToseq_net.py`. diff --git a/release_doc/0.9.0/doc/_sources/build/build_from_source.txt b/release_doc/0.9.0/doc/_sources/build/build_from_source.txt new file mode 100644 index 0000000000000000000000000000000000000000..b8f26f431eb7a04147fe791a8c805427c827fe09 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/build/build_from_source.txt @@ -0,0 +1,192 @@ +Installing from Sources +========================== + +* [1. Download and Setup](#download) +* [2. Requirements](#requirements) +* [3. Build on Ubuntu](#ubuntu) + +## Download and Setup +You can download PaddlePaddle from the [github source](https://github.com/gangliao/Paddle). + +```bash +git clone https://github.com/baidu/Paddle paddle +cd paddle +``` + +## Requirements + +To compile the source code, your computer must be equipped with GCC >=4.6 or Clang compiler. +### Dependencies + +- **CMake**: version >= 2.8 +- **BLAS**: MKL, OpenBlas or ATLAS +- **protobuf**: version >= 2.4, **Note: 3.x is not supported** +- **python**: only python 2.7 is supported currently + +### Options + +PaddlePaddle supports some build options. To enable it, first you need to install the related libraries. + + + + + + + + + + + + + + + + + + + +
OptionalDescription
WITH_GPUCompile with GPU mode.
WITH_DOUBLECompile with double precision floating-point, default: single precision.
WITH_GLOGCompile with glog. If not found, default: an internal log implementation.
WITH_GFLAGSCompile with gflags. If not found, default: an internal flag implementation.
WITH_TESTINGCompile with gtest for PaddlePaddle's unit testing.
WITH_DOC Compile to generate PaddlePaddle's docs, default: disabled (OFF).
WITH_SWIG_PYCompile with python predict API, default: disabled (OFF).
WITH_STYLE_CHECKCompile with code style check, default: enabled (ON).
+ + +**Note:** + - The GPU version works best with Cuda Toolkit 7.5 and cuDNN v5. + - Other versions like Cuda Toolkit 6.5, 7.0, 8.0 and cuDNN v2, v3, v4 are also supported. + - **To utilize cuDNN v5, Cuda Toolkit 7.5 is prerequisite and vice versa.** + +As a simple example, consider the following: + +1. **Python Dependencies(optional)** + + To compile PaddlePaddle with python predict API, make sure swig installed and set `-DWITH_SWIG_PY=ON` as follows: + + ```bash + # install swig on ubuntu + sudo apt-get install swig + # install swig on Mac OS X + brew install swig + + # active swig in cmake + cmake .. -DWITH_SWIG_PY=ON + ``` + +2. **Doc Dependencies(optional)** + + To generate PaddlePaddle's documentation, install dependencies and set `-DWITH_DOC=ON` as follows: + + ```bash + pip install 'sphinx>=1.4.0' + pip install sphinx_rtd_theme breathe recommonmark + + # install doxygen on Ubuntu + sudo apt-get install doxygen + # install doxygen on Mac OS X + brew install doxygen + + # active docs in cmake + cmake .. -DWITH_DOC=ON` + ``` + +## Build on Ubuntu 14.04 + +### Install Dependencies + +- **CPU Dependencies** + + ```bash + # necessary + sudo apt-get update + sudo apt-get install -y g++ make cmake build-essential libatlas-base-dev python python-pip libpython-dev m4 libprotobuf-dev protobuf-compiler python-protobuf python-numpy git + # optional + sudo apt-get install libgoogle-glog-dev + sudo apt-get install libgflags-dev + sudo apt-get install libgtest-dev + sudo pip install wheel + pushd /usr/src/gtest + cmake . + make + sudo cp *.a /usr/lib + popd + ``` + +- **GPU Dependencies (optional)** + + To build GPU version, you will need the following installed: + + 1. a CUDA-capable GPU + 2. A supported version of Linux with a gcc compiler and toolchain + 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads) + 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn) + + The CUDA development environment relies on tight integration with the host development environment, + including the host compiler and C runtime libraries, and is therefore only supported on + distribution versions that have been qualified for this CUDA Toolkit release. + + After downloading cuDNN library, issue the following commands: + + ```bash + sudo tar -xzf cudnn-7.5-linux-x64-v5.1.tgz -C /usr/local + sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn* + ``` + Then you need to set LD\_LIBRARY\_PATH, PATH environment variables in ~/.bashrc. + + ```bash + export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH + export PATH=/usr/local/cuda/bin:$PATH + ``` + +### Build and Install + +As usual, the best option is to create build folder under paddle project directory. + +```bash +mkdir build && cd build +cmake .. +``` + +CMake first check PaddlePaddle's dependencies in system default path. After installing some optional +libraries, corresponding build option will be set automatically (for instance, glog, gtest and gflags). +If still not found, you can manually set it based on CMake error information from your screen. + +As a simple example, consider the following: + +- **Only CPU** + + ```bash + cmake .. -DWITH_GPU=OFF + ``` +- **GPU** + + ```bash + cmake .. -DWITH_GPU=ON + ``` + +- **GPU with doc and swig** + + ```bash + cmake .. -DWITH_GPU=ON -DWITH_DOC=ON -DWITH_SWIG_PY=ON + ``` + +Finally, you can build PaddlePaddle: + +```bash +# you can add build option here, such as: +cmake .. -DWITH_GPU=ON -DCMAKE_INSTALL_PREFIX= +# please use sudo make install, if you want to install PaddlePaddle into the system +make -j `nproc` && make install +# set PaddlePaddle installation path in ~/.bashrc +export PATH=/bin:$PATH +``` + +**Note:** + +If you set `WITH_SWIG_PY=ON`, related python dependencies also need to be installed. +Otherwise, PaddlePaddle will automatically install python dependencies +at first time when user run paddle commands, such as `paddle version`, `paddle train`. +It may require sudo privileges: + +```bash +# you can run +sudo pip install /opt/paddle/share/wheels/*.whl +# or just run +sudo paddle version +``` diff --git a/release_doc/0.9.0/doc/_sources/build/contribute_to_paddle.txt b/release_doc/0.9.0/doc/_sources/build/contribute_to_paddle.txt new file mode 100644 index 0000000000000000000000000000000000000000..a9ab69c5f42b8d341dca87479a642e28ca58fbf4 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/build/contribute_to_paddle.txt @@ -0,0 +1,130 @@ +# Contribute to PaddlePaddle + +We sincerely appreciate your contributions. You can use fork and pull request +workflow to merge your code. + +## Code Requirements +- Your code must be fully documented by + [doxygen](http://www.stack.nl/~dimitri/doxygen/) style. +- Make sure the compiler option WITH\_STYLE\_CHECK is on and the compiler + passes the code style check. +- All code must have unit test. +- Pass all unit tests. + +The following tutorial guides you into submitting your contibution. + +## [Creating a Fork](https://help.github.com/articles/fork-a-repo/) + +Just head over to the GitHub page and click the "Fork" button. +It's just that simple. + +## Clone + +Paddle is currently using [git-flow branching model](http://nvie.com/posts/a-successful-git-branching-model/). +The **develop** is the main branch, and other user's branches are feature branches. + +Once you've created a fork, you can use your favorite git client to clone your +repo or just head straight to the command line: + +```shell +# Clone your fork to your local machine +git clone --branch develop https://github.com/USERNAME/Paddle.git +``` +If your repository doesn't contain **develop** branch, just create it by your own. + +```shell +git clone https://github.com/USERNAME/Paddle.git Paddle +cd Paddle +git checkout -b develop # create develop branch. +git remote add upstream https://github.com/baidu/Paddle.git # add upstream to baidu/Paddle +git pull upstream develop # update to upstream +``` + +Then you can start to develop by making a local developement branch + +```shell +git checkout -b MY_COOL_STUFF_BRANCH +``` + +## Commit + +Commit your changes by following command lines: + +```shell +# show the working tree status +git status +# add modified files +git add xx +env EDITOR=vim git commit # You can write your comments by vim/nano/emacs. +``` +The first line of commit infomation is the title. The second and later lines +are the details if any. + +## Keeping Fork Up to Date + +Before pull your request, you should sync your code from the latest PaddlePaddle. +To do this, you'll need to add a remote at first: + +```shell +# see the current configured remote repository +git remote -v +# add upstream repository +git remote add upstream https://github.com/baidu/Paddle.git +# verify the new upstream +git remote -v +``` + +Update your fork with the latest upstream changes: + +```shell +git pull --rebase upstream develop +``` + +If there are no unique commits locally, git will simply perform a fast-forward. +However, if you have been making changes (in the vast majority of cases you +probably shouldn't be), you may have to deal with conflicts. + +Now, your local master branch is up-to-date with everything modified upstream. + +## Push to GitHub + +```shell +# push to your repository in Github +git push -u origin MY_COOL_STUFF_BRANCH # create remote branch MY_COOL_STUFF_BRANCH to origin. +``` + +## Pull Request + +Go to the page for your fork on GitHub, select your development branch, +and click the **pull request button**. + +## Update your pull request with the lastest version + +During the code review, your pull request may become stale because new commits in +baidu/Paddle. GitHub allows autmotic update if there is no conflict. You can do this +by clicking the "Update Branch" button in your pull request page. However, in the case +of conflict, you need to do the update manually. You need to do the following on +your local repository: +```shell +git checkout MY_COOL_STUFF_BRANCH +git pull upstream develop +# You may need to resolve the conflict according to the git prompt. +# Make and test your code. +git push origin MY_COOL_STUFF_BRANCH +``` +Now your Pull Request is updated with the latest version. + +## Revise your pull request + +When you revise your pull request according to reviewer's comments, please use 'git commit' instead of 'git commit --amend' to commit your changes so that the reviewers can see the difference between the new pull requrest and the old pull request. + +The possible commands are + +```shell +git checkout MY_COOL_STUFF_BRANCH +git pull upstream develop # update local to newest code base. +# May be some conflicts will occured. +# And develop your cool stuff +env EDITOR=vim git commit # add your revise log +git push origin MY_COOL_STUFF_BRANCH +``` diff --git a/release_doc/0.9.0/doc/_sources/build/docker_install.txt b/release_doc/0.9.0/doc/_sources/build/docker_install.txt new file mode 100644 index 0000000000000000000000000000000000000000..e95de35f4da35fee511551f13bc6026532cce5c3 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/build/docker_install.txt @@ -0,0 +1,122 @@ +Docker installation guide +========================== + +PaddlePaddle provide the `Docker `_ image. `Docker`_ is a lightweight container utilities. The performance of PaddlePaddle in `Docker`_ container is basically as same as run it in a normal linux. The `Docker`_ is a very convenient way to deliver the binary release for linux programs. + +.. note:: + + The `Docker`_ image is the recommended way to run PaddlePaddle + +PaddlePaddle Docker images +-------------------------- + +There are 12 `images `_ for PaddlePaddle, and the name is :code:`paddle-dev/paddle`, tags are\: + + ++-----------------+------------------+------------------------+-----------------------+ +| | normal | devel | demo | ++=================+==================+========================+=======================+ +| CPU | cpu-latest | cpu-devel-latest | cpu-demo-latest | ++-----------------+------------------+------------------------+-----------------------+ +| GPU | gpu-latest | gpu-devel-latest | gpu-demo-latest | ++-----------------+------------------+------------------------+-----------------------+ +| CPU WITHOUT AVX | cpu-noavx-latest | cpu-devel-noavx-latest | cpu-demo-noavx-latest | ++-----------------+------------------+------------------------+-----------------------+ +| GPU WITHOUT AVX | gpu-noavx-latest | gpu-devel-noavx-latest | gpu-demo-noavx-latest | ++-----------------+------------------+------------------------+-----------------------+ + +And the three columns are: + +* normal\: The docker image only contains binary of PaddlePaddle. +* devel\: The docker image contains PaddlePaddle binary, source code and essential build environment. +* demo\: The docker image contains the dependencies to run PaddlePaddle demo. + +And the four rows are: + +* CPU\: CPU Version. Support CPU which has :code:`AVX` instructions. +* GPU\: GPU Version. Support GPU, and cpu has :code:`AVX` instructions. +* CPU WITHOUT AVX\: CPU Version, which support most CPU even doesn't have :code:`AVX` instructions. +* GPU WITHOUT AVX\: GPU Version, which support most CPU even doesn't have :code:`AVX` instructions. + +User can choose any version depends on machine. The following script can help you to detect your CPU support :code:`AVX` or not. + +.. code-block:: bash + + if cat /proc/cpuinfo | grep -q avx ; then echo "Support AVX"; else echo "Not support AVX"; fi + +If the output is :code:`Support AVX`, then you can choose the AVX version of PaddlePaddle, otherwise, you need select :code:`noavx` version of PaddlePaddle. For example, the CPU develop version of PaddlePaddle is :code:`paddle-dev/paddle:cpu-devel-latest`. + +The PaddlePaddle images don't contain any entry command. You need to write your entry command to use this image. See :code:`Remote Access` part or just use following command to run a :code:`bash` + +.. code-block:: bash + + docker run -it paddledev/paddle:cpu-latest /bin/bash + + +Download and Run Docker images +------------------------------ + +You have to install Docker in your machine which has linux kernel version 3.10+ first. You can refer to the official guide https://docs.docker.com/engine/installation/ for further information. + +You can use :code:`docker pull ` to download images first, or just launch a container with :code:`docker run` \: + +.. code-block:: bash + + docker run -it paddledev/paddle:cpu-latest + + +If you want to launch container with GPU support, you need to set some environment variables at the same time: + +.. code-block:: bash + + export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" + export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') + docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest + + +Some notes for docker +--------------------- + +Performance ++++++++++++ + +Since Docker is based on the lightweight virtual containers, the CPU computing performance maintains well. And GPU driver and equipments are all mapped to the container, so the GPU computing performance would not be seriously affected. + +If you use high performance nic, such as RDMA(RoCE 40GbE or IB 56GbE), Ethernet(10GbE), it is recommended to use config "-net = host". + + + + +Remote access ++++++++++++++ + + +If you want to enable ssh access background, you need to build an image by yourself. Please refer to official guide https://docs.docker.com/engine/reference/builder/ for further information. + +Following is a simple Dockerfile with ssh: + +.. literalinclude:: ../../doc_cn/build_and_install/install/paddle_ssh.Dockerfile + +Then you can build an image with Dockerfile and launch a container: + +.. code-block:: bash + + # cd into Dockerfile directory + docker build . -t paddle_ssh + # run container, and map host machine port 8022 to container port 22 + docker run -d -p 8022:22 --name paddle_ssh_machine paddle_ssh + +Now, you can ssh on port 8022 to access the container, username is root, password is also root: + +.. code-block:: bash + + ssh -p 8022 root@YOUR_HOST_MACHINE + +You can stop and delete the container as following: + +.. code-block:: bash + + # stop + docker stop paddle_ssh_machine + # delete + docker rm paddle_ssh_machine diff --git a/release_doc/0.9.0/doc/_sources/build/index.txt b/release_doc/0.9.0/doc/_sources/build/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..511cdea145c7fd0e41566d0a85115dbb06f84058 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/build/index.txt @@ -0,0 +1,33 @@ +Build And Install PaddlePaddle +================================ + +Install PaddlePaddle +---------------------- + +.. toctree:: + :maxdepth: 1 + :glob: + + install_* + internal/install_from_jumbo.md + docker_install.rst + ubuntu_install.rst + +Build from Source +----------------- + +.. warning:: + + Please use :code:`deb` package or :code:`docker` image to install paddle. The building guide is used for hacking or contributing to PaddlePaddle. + + +If you want to hack and contribute PaddlePaddle source code, following guides can help you\: + + +.. toctree:: + :maxdepth: 1 + :glob: + + build_from_source.md + contribute_to_paddle.md + diff --git a/release_doc/0.9.0/doc/_sources/build/ubuntu_install.txt b/release_doc/0.9.0/doc/_sources/build/ubuntu_install.txt new file mode 100644 index 0000000000000000000000000000000000000000..ea8042085bf458be96e71017d229d88ad867695b --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/build/ubuntu_install.txt @@ -0,0 +1,25 @@ +Debian Package installation guide +================================= + +PaddlePaddle supports :code:`deb` pacakge. The installation of this :code:`deb` package is tested in ubuntu 14.04, but it should be support other debian based linux, too. + +There are four versions of debian package, :code:`cpu`, :code:`gpu`, :code:`cpu-noavx`, :code:`gpu-noavx`. And :code:`noavx` version is used to support CPU which does not contain :code:`AVX` instructions. The download url of :code:`deb` package is \: https://github.com/baidu/Paddle/releases/ + + +After downloading PaddlePaddle deb packages, you can use :code:`gdebi` install. + +.. code-block:: bash + + gdebi paddle-*.deb + +If :code:`gdebi` is not installed, you can use :code:`sudo apt-get install gdebi` to install it. + +Or you can use following commands to install PaddlePaddle. + +.. code-block:: bash + + dpkg -i paddle-*.deb + apt-get install -f + +And if you use GPU version deb package, you need to install CUDA toolkit and cuDNN, and set related environment variables(such as LD_LIBRARY_PATH) first. It is normal when `dpkg -i` get errors. `apt-get install -f` will continue install paddle, and install dependences. + diff --git a/release_doc/0.9.0/doc/_sources/cluster/index.txt b/release_doc/0.9.0/doc/_sources/cluster/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..9062f85f98d2981b5c8dcf8149e32c2ccdac77f4 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/cluster/index.txt @@ -0,0 +1,8 @@ +Cluster Train +==================== + +.. toctree:: + :glob: + + opensource/cluster_train.md + internal/index.md diff --git a/release_doc/0.9.0/doc/_sources/cluster/opensource/cluster_train.txt b/release_doc/0.9.0/doc/_sources/cluster/opensource/cluster_train.txt new file mode 100644 index 0000000000000000000000000000000000000000..cb493a88f031850cb6a5eeed0ebe9e41bb7e01c3 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/cluster/opensource/cluster_train.txt @@ -0,0 +1,156 @@ +# Distributed Training + +In this article, we explain how to run distributed Paddle training jobs on clusters. We will create the distributed version of the single-process training example, [recommendation](https://github.com/baidu/Paddle/tree/develop/demo/recommendation). + +[Scripts](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train) used in this article launch distributed jobs via SSH. They also work as a reference for users running more sophisticated cluster management systems like MPI and Kubernetes. + +## Prerequisite + +1. Aforementioned scripts use a Python library [fabric](http://www.fabfile.org/) to run SSH commands. We can use `pip` to install fabric: + + ```bash +pip install fabric + ``` + +1. We need to install PaddlePaddle on all nodes in the cluster. To enable GPUs, we need to install CUDA in `/usr/local/cuda`; otherwise Paddle would report errors at runtime. + +1. Set the `ROOT_DIR` variable in [`cluster_train/conf.py`] on all nodes. For convenience, we often create a Unix user `paddle` on all nodes and set `ROOT_DIR=/home/paddle`. In this way, we can write public SSH keys into `/home/paddle/.ssh/authorized_keys` so that user `paddle` can SSH to all nodes without password. + +## Prepare Job Workspace + +We refer to the directory where we put dependent libraries, config files, etc., as *workspace*. + +These ```train/test``` data should be prepared before launching cluster job. To satisfy the requirement that train/test data are placed in different directory from workspace, PADDLE refers train/test data according to index file named as ```train.list/test.list``` which are used in model config file. So the train/test data also contains train.list/test.list two list file. All local training demo already provides scripts to help you create these two files, and all nodes in cluster job will handle files with same logical code in normal condition. + +Generally, you can use same model file from local training for cluster training. What you should have in mind that, the ```batch_size``` set in ```setting``` function in model file means batch size in ```each``` node of cluster job instead of total batch size if synchronization SGD was used. + +Following steps are based on demo/recommendation demo in demo directory. + +You just go through demo/recommendation tutorial doc until ```Train``` section, and at last you will get train/test data and model configuration file. Finaly, just use demo/recommendation as workspace for cluster training. + +At last your workspace should look like as follow: +``` +. +|-- common_utils.py +|-- data +| |-- config.json +| |-- config_generator.py +| |-- meta.bin +| |-- meta_config.json +| |-- meta_generator.py +| |-- ml-1m +| |-- ml_data.sh +| |-- ratings.dat.test +| |-- ratings.dat.train +| |-- split.py +| |-- test.list +| `-- train.list +|-- dataprovider.py +|-- evaluate.sh +|-- prediction.py +|-- preprocess.sh +|-- requirements.txt +|-- run.sh +`-- trainer_config.py +``` +Not all of these files are needed for cluster training, but it's not necessary to remove useless files. + +```trainer_config.py``` +Indicates the model config file. + +```train.list``` and ```test.list``` +File index. It stores all relative or absolute file paths of all train/test data at current node. + +```dataprovider.py``` +used to read train/test samples. It's same as local training. + +```data``` +all files in data directory are refered by train.list/test.list which are refered by data provider. + + +## Prepare Cluster Job Configuration + +The options below must be carefully set in cluster_train/conf.py + +```HOSTS``` all nodes hostname or ip that will run cluster job. You can also append user and ssh port with hostname, such as root@192.168.100.17:9090. + +```ROOT_DIR``` workspace ROOT directory for placing JOB workspace directory + +```PADDLE_NIC``` the NIC(Network Interface Card) interface name for cluster communication channel, such as eth0 for ethternet, ib0 for infiniband. + +```PADDLE_PORT``` port number for cluster commnunication channel + +```PADDLE_PORTS_NUM``` the number of port used for cluster communication channle. if the number of cluster nodes is small(less than 5~6nodes), recommend you set it to larger, such as 2 ~ 8, for better network performance. + +```PADDLE_PORTS_NUM_FOR_SPARSE``` the number of port used for sparse updater cluster commnunication channel. if sparse remote update is used, set it like ```PADDLE_PORTS_NUM``` + +```LD_LIBRARY_PATH``` set addtional LD_LIBRARY_PATH for cluster job. You can use it to set CUDA libraries path. + +Default Configuration as follow: + +```python +HOSTS = [ + "root@192.168.100.17", + "root@192.168.100.18", + ] + +''' +workspace configuration +''' + +#root dir for workspace +ROOT_DIR = "/home/paddle" + +''' +network configuration +''' +#pserver nics +PADDLE_NIC = "eth0" +#pserver port +PADDLE_PORT = 7164 +#pserver ports num +PADDLE_PORTS_NUM = 2 +#pserver sparse ports num +PADDLE_PORTS_NUM_FOR_SPARSE = 2 + +#environments setting for all processes in cluster job +LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/lib64" +``` + +### Launching Cluster Job +```paddle.py``` provides automatical scripts to start all PaddlePaddle cluster processes in different nodes. By default, all command line options can set as ```paddle.py``` command options and ```paddle.py``` will transparently and automatically set these options to PaddlePaddle lower level processes. + +```paddle.py```provides two distinguished command option for easy job launching. + +```job_dispatch_package``` set it with local ```workspace```directory, it will be dispatched to all nodes set in conf.py. It could be helpful for frequent hacking workspace files, otherwise frequent mulit-nodes workspace deployment could make your crazy. +```job_workspace``` set it with already deployed workspace directory, ```paddle.py``` will skip dispatch stage to directly launch cluster job with all nodes. It could help to reduce heavy +dispatch latency. + +```cluster_train/run.sh``` provides command line sample to run ```demo/recommendation``` cluster job, just modify ```job_dispatch_package``` and ```job_workspace``` with your defined directory, then: +``` +sh run.sh +``` + +The cluster Job will start in several seconds. + +### Kill Cluster Job +```paddle.py``` can capture ```Ctrl + C``` SIGINT signal to automatically kill all processes launched by it. So just stop ```paddle.py``` to kill cluster job. You should mannally kill job if program crashed. + +### Check Cluster Training Result +Check log in $workspace/log for details, each node owns same log structure. + +```paddle_trainer.INFO``` +It provides almost all interal output log for training, same as local training. Check runtime model convergence here. + +```paddle_pserver2.INFO``` +It provides pserver running log, which could help to diagnose distributed error. + +```server.log``` +It provides stderr and stdout of pserver process. Check error log if training crashs. + +```train.log``` +It provides stderr and stdout of trainer process. Check error log if training crashs. + +### Check Model Output +After one pass finished, model files will be writed in ```output``` directory in node 0. +```nodefile``` in workspace indicates the node id of current cluster job. diff --git a/release_doc/0.9.0/doc/_sources/demo/embedding_model/index.txt b/release_doc/0.9.0/doc/_sources/demo/embedding_model/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..06f3ff1f009e470cdb9687658613a76acbb79751 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/embedding_model/index.txt @@ -0,0 +1,139 @@ +# Chinese Word Embedding Model Tutorial # +---------- +This tutorial is to guide you through the process of using a Pretrained Chinese Word Embedding Model in the PaddlePaddle standard format. + +We thank @lipeng for the pull request that defined the model schemas and pretrained the models. + +## Introduction ### +### Chinese Word Dictionary ### +Our Chinese-word dictionary is created on Baidu ZhiDao and Baidu Baike by using in-house word segmentor. For example, the participle of "《红楼梦》" is "《","红楼梦","》",and "《红楼梦》". Our dictionary (using UTF-8 format) has has two columns: word and its frequency. The total word count is 3206325, including 3 special token: + - ``: the start of a sequence + - ``: the end of a sequence + - ``: a word not included in dictionary + +### Pretrained Chinese Word Embedding Model ### +Inspired by paper [A Neural Probabilistic Language Model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf), our model architecture (**Embedding joint of six words->FullyConnect->SoftMax**) is as following graph. And for our dictionary, we pretrain four models with different word vector dimenstions, i.e 32, 64, 128, 256. +
![](./neural-n-gram-model.png)
+
Figure 1. neural-n-gram-model
+ +### Download and Extract ### +To download and extract our dictionary and pretrained model, run the following commands. + + cd $PADDLE_ROOT/demo/model_zoo/embedding + ./pre_DictAndModel.sh + +## Chinese Paraphrasing Example ## +We provide a paraphrasing task to show the usage of pretrained Chinese Word Dictionary and Embedding Model. + +### Data Preparation and Preprocess ### + +First, run the following commands to download and extract the in-house dataset. The dataset (using UTF-8 format) has 20 training samples, 5 testing samples and 2 generating samples. + + cd $PADDLE_ROOT/demo/seqToseq/data + ./paraphrase_data.sh + +Second, preprocess data and build dictionary on train data by running the following commands, and the preprocessed dataset is stored in `$PADDLE_SOURCE_ROOT/demo/seqToseq/data/pre-paraphrase`: + + cd $PADDLE_ROOT/demo/seqToseq/ + python preprocess.py -i data/paraphrase [--mergeDict] + +- `--mergeDict`: if using this option, the source and target dictionary are merged, i.e, two dictionaries have the same context. Here, as source and target data are all chinese words, this option can be used. + +### User Specified Embedding Model ### +The general command of extracting desired parameters from the pretrained embedding model based on user dictionary is: + + cd $PADDLE_ROOT/demo/model_zoo/embedding + python extract_para.py --preModel PREMODEL --preDict PREDICT --usrModel USRMODEL--usrDict USRDICT -d DIM + +- `--preModel PREMODEL`: the name of pretrained embedding model +- `--preDict PREDICT`: the name of pretrained dictionary +- `--usrModel USRMODEL`: the name of extracted embedding model +- `--usrDict USRDICT`: the name of user specified dictionary +- `-d DIM`: dimension of parameter + +Here, you can simply run the command: + + cd $PADDLE_ROOT/demo/seqToseq/data/ + ./paraphrase_model.sh + +And you will see following embedding model structure: + + paraphrase_model + |--- _source_language_embedding + |--- _target_language_embedding + +### Training Model in PaddlePaddle ### +First, create a model config file, see example `demo/seqToseq/paraphrase/train.conf`: + + from seqToseq_net import * + is_generating = False + + ################## Data Definition ##################### + train_conf = seq_to_seq_data(data_dir = "./data/pre-paraphrase", + job_mode = job_mode) + + ############## Algorithm Configuration ################## + settings( + learning_method = AdamOptimizer(), + batch_size = 50, + learning_rate = 5e-4) + + ################# Network configure ##################### + gru_encoder_decoder(train_conf, is_generating, word_vector_dim = 32) + +This config is almost the same as `demo/seqToseq/translation/train.conf`. + +Then, train the model by running the command: + + cd $PADDLE_SOURCE_ROOT/demo/seqToseq/paraphrase + ./train.sh + +where `train.sh` is almost the same as `demo/seqToseq/translation/train.sh`, the only difference is following two command arguments: + +- `--init_model_path`: path of the initialization model, here is `data/paraphrase_model` +- `--load_missing_parameter_strategy`: operations when model file is missing, here use a normal distibution to initialize the other parameters except for the embedding layer + +For users who want to understand the dataset format, model architecture and training procedure in detail, please refer to [Text generation Tutorial](../text_generation/text_generation.md). + +## Optional Function ## +### Embedding Parameters Observation +For users who want to observe the embedding parameters, this function can convert a PaddlePaddle binary embedding model to a text model by running the command: + + cd $PADDLE_ROOT/demo/model_zoo/embedding + python paraconvert.py --b2t -i INPUT -o OUTPUT -d DIM + +- `-i INPUT`: the name of input binary embedding model +- `-o OUTPUT`: the name of output text embedding model +- `-d DIM`: the dimension of parameter + +You will see parameters like this in output text model: + + 0,4,32156096 + -0.7845433,1.1937413,-0.1704215,0.4154715,0.9566584,-0.5558153,-0.2503305, ...... + 0.0000909,0.0009465,-0.0008813,-0.0008428,0.0007879,0.0000183,0.0001984, ...... + ...... + +- 1st line is **PaddlePaddle format file head**, it has 3 attributes: + - version of PaddlePaddle, here is 0 + - sizeof(float), here is 4 + - total number of parameter, here is 32156096 +- Other lines print the paramters (assume `` = 32) + - each line print 32 paramters splitted by ',' + - there is 32156096/32 = 1004877 lines, meaning there is 1004877 embedding words + +### Embedding Parameters Revision +For users who want to revise the embedding parameters, this function can convert a revised text embedding model to a PaddlePaddle binary model by running the command: + + cd $PADDLE_ROOT/demo/model_zoo/embedding + python paraconvert.py --t2b -i INPUT -o OUTPUT + +- `-i INPUT`: the name of input text embedding model. +- `-o OUTPUT`: the name of output binary embedding model + +Note that the format of input text model is as follows: + + -0.7845433,1.1937413,-0.1704215,0.4154715,0.9566584,-0.5558153,-0.2503305, ...... + 0.0000909,0.0009465,-0.0008813,-0.0008428,0.0007879,0.0000183,0.0001984, ...... + ...... +- there is no file header in 1st line +- each line stores parameters for one word, the separator is commas ',' diff --git a/release_doc/0.9.0/doc/_sources/demo/image_classification/image_classification.txt b/release_doc/0.9.0/doc/_sources/demo/image_classification/image_classification.txt new file mode 100644 index 0000000000000000000000000000000000000000..29cfc99702c362d1eaeeff5332f56122b8de337a --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/image_classification/image_classification.txt @@ -0,0 +1,221 @@ +Image Classification Tutorial +============================== + +This tutorial will guide you through training a convolutional neural network to classify objects using the CIFAR-10 image classification dataset. +As shown in the following figure, the convolutional neural network can recognize the main object in images, and output the classification result. + +
![Image Classification](./image_classification.png)
+ +## Data Preparation +First, download CIFAR-10 dataset. CIFAR-10 dataset can be downloaded from its official website. + + + +We have prepared a script to download and process CIFAR-10 dataset. The script will download CIFAR-10 dataset from the official dataset. +It will convert it to jpeg images and organize them into a directory with the required structure for the tutorial. Make sure that you have installed pillow and its dependents. +Consider the following commands: + +1. install pillow dependents + +```bash +sudo apt-get install libjpeg-dev +pip install pillow +``` + +2. download data and preparation + +```bash +cd demo/image_classification/data/ +sh download_cifar.sh +``` + +The CIFAR-10 dataset consists of 60000 32x32 color images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. + +Here are the classes in the dataset, as well as 10 random images from each: +
![Image Classification](./cifar.png)
+ + +After downloading and converting, we should find a directory (cifar-out) containing the dataset in the following format: + +``` +train +---airplane +---automobile +---bird +---cat +---deer +---dog +---frog +---horse +---ship +---truck +test +---airplane +---automobile +---bird +---cat +---deer +---dog +---frog +---horse +---ship +---truck +``` + +It has two directories:`train` and `test`. These two directories contain training data and testing data of CIFAR-10, respectively. Each of these two folders contains 10 sub-folders, ranging from `airplane` to `truck`. Each sub-folder contains images with the corresponding label. After the images are organized into this structure, we are ready to train an image classification model. + +## Preprocess +After the data has been downloaded, it needs to be pre-processed into the Paddle format. We can run the following command for preprocessing. + +``` +cd demo/image_classification/ +sh preprocess.sh +``` + +`preprocess.sh` calls `./demo/image_classification/preprocess.py` to preprocess image data. +```sh +export PYTHONPATH=$PYTHONPATH:../../ +data_dir=./data/cifar-out +python preprocess.py -i $data_dir -s 32 -c 1 +``` + +`./demo/image_classification/preprocess.py` has the following arguments + +- `-i` or `--input` specifes the input data directory. +- `-s` or `--size` specifies the processed size of images. +- `-c` or `--color` specifes whether images are color images or gray images. + + +## Model Training +We need to create a model config file before training the model. An example of the config file (vgg_16_cifar.py) is listed below. **Note**, it is slightly different from the `vgg_16_cifar.py` which also applies to the prediction. + +```python +from paddle.trainer_config_helpers import * +data_dir='data/cifar-out/batches/' +meta_path=data_dir+'batches.meta' +args = {'meta':meta_path, 'mean_img_size': 32, + 'img_size': 32, 'num_classes': 10, + 'use_jpeg': 1, 'color': "color"} +define_py_data_sources2(train_list=data_dir+"train.list", + test_list=data_dir+'test.list', + module='image_provider', + obj='processData', + args=args) +settings( + batch_size = 128, + learning_rate = 0.1 / 128.0, + learning_method = MomentumOptimizer(0.9), + regularization = L2Regularization(0.0005 * 128)) + +img = data_layer(name='image', size=3*32*32) +lbl = data_layer(name="label", size=10) +# small_vgg is predined in trainer_config_helpers.network +predict = small_vgg(input_image=img, num_channels=3) +outputs(classification_cost(input=predict, label=lbl)) +``` + +The first line imports python functions for defining networks. +```python +from paddle.trainer_config_helpers import * +``` + +Then define an `define_py_data_sources2` which use python data provider +interface. The arguments in `args` are used in `image_provider.py` which +yeilds image data and transform them to Paddle. + - `meta`: the mean value of training set. + - `mean_img_size`: the size of mean feature map. + - `img_size`: the height and width of input image. + - `num_classes`: the number of classes. + - `use_jpeg`: the data storage type when preprocessing. + - `color`: specify color image. + +`settings` specifies the training algorithm. In the following example, +it specifies learning rate as 0.1, but divided by batch size, and the weight decay +is 0.0005 and multiplied by batch size. +```python +settings( + batch_size = 128, + learning_rate = 0.1 / 128.0, + learning_method = MomentumOptimizer(0.9), + regularization = L2Regularization(0.0005 * 128) +) +``` + +The `small_vgg` specifies the network. We use a small version of VGG convolutional network as our network +for classification. A description of VGG network can be found here [http://www.robots.ox.ac.uk/~vgg/research/very_deep/](http://www.robots.ox.ac.uk/~vgg/research/very_deep/). +```python +# small_vgg is predined in trainer_config_helpers.network +predict = small_vgg(input_image=img, num_channels=3) +``` +After writing the config, we can train the model by running the script train.sh. Notice that the following script assumes the you run the script in the `./demo/image_classification` folder. If you run the script in a different folder, you need to change the paths of the scripts and the configuration files accordingly. + +```bash +config=vgg_16_cifar.py +output=./cifar_vgg_model +log=train.log + +paddle train \ +--config=$config \ +--dot_period=10 \ +--log_period=100 \ +--test_all_data_in_one_period=1 \ +--use_gpu=1 \ +--save_dir=$output \ +2>&1 | tee $log + +python -m paddle.utils.plotcurve -i $log > plot.png +``` + +- Here we use GPU mode to train. If you have no gpu environment, just set `use_gpu=0`. + +- `./demo/image_classification/vgg_16_cifar.py` is the network and data configuration file. The meaning of the other flags can be found in the documentation of the command line flags. + +- The script `plotcurve.py` requires the python module of `matplotlib`, so if it fails, maybe you need to install `matplotlib`. + + +After training finishes, the training and testing error curves will be saved to `plot.png` using `plotcurve.py` script. An example of the plot is shown below: + +
![Training and testing curves.](./plot.png)
+ + +## Prediction +After we train the model, the model file as well as the model parameters are stored in path `./cifar_vgg_model/pass-%05d`. For example, the model of the 300-th pass is stored at `./cifar_vgg_model/pass-00299`. + +To make a prediction for an image, one can run `predict.sh` as follows. The script will output the label of the classfiication. + +``` +sh predict.sh +``` + +predict.sh: +``` +model=cifar_vgg_model/pass-00299/ +image=data/cifar-out/test/airplane/seaplane_s_000978.png +use_gpu=1 +python prediction.py $model $image $use_gpu +``` + +## Exercise +Train a image classification of birds using VGG model and CUB-200 dataset. The birds dataset can be downloaded here. It contains an image dataset with photos of 200 bird species (mostly North American). + + + + + + +## Delve into Details +### Convolutional Neural Network +A Convolutional Neural Network is a feedforward neural network that uses convolution layers. It is very suitable for building neural networks that process and understand images. A standard convolutional neural network is shown below: + +![Convolutional Neural Network](./lenet.png) + +Convolutional Neural Network contains the following layers: + +- Convolutional layer: It uses convolution operation to extract features from an image or a feature map. +- Pooling layer: It uses max-pooling to downsample feature maps. +- Fully Connected layer: It uses fully connected connections to transform features. + +Convolutional Neural Network achieves amazing performance for image classification because it exploits two important characteristics of images: *local correlation* and *spatial invariance*. By iteratively applying convolution and max-pooing operations, convolutional neural network can well represent these two characteristics of images. + + +For more details of how to define layers and their connections, please refer to the documentation of layers. diff --git a/release_doc/0.9.0/doc/_sources/demo/image_classification/index.txt b/release_doc/0.9.0/doc/_sources/demo/image_classification/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ea68f14164b22cd211d09d72a7358fe24e4fed7 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/image_classification/index.txt @@ -0,0 +1,10 @@ +Image Classification Tutorial +============================= + +.. toctree:: + :maxdepth: 3 + :glob: + + Training Locally + cluster_train/internal/cluster_train.md + cluster_train/opensource/cluster_train.md diff --git a/release_doc/0.9.0/doc/_sources/demo/imagenet_model/resnet_model.txt b/release_doc/0.9.0/doc/_sources/demo/imagenet_model/resnet_model.txt new file mode 100644 index 0000000000000000000000000000000000000000..5403ab9f17d2399fee878d0f3c512cb166aba06f --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/imagenet_model/resnet_model.txt @@ -0,0 +1,286 @@ +# Model Zoo - ImageNet # + +[ImageNet](http://www.image-net.org/) is a popular dataset for generic object classification. This tutorial provides convolutional neural network(CNN) models for ImageNet. + +## ResNet Introduction + +ResNets from paper [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385) won the 1st place on the ILSVRC 2015 classification task. They present residual learning framework to ease the training of networks that are substantially deeper than those used previously. The residual connections are shown in following figure. The left building block is used in network of 34 layers and the right bottleneck building block is used in network of 50, 101, 152 layers . + +
![resnet_block](./resnet_block.jpg)
+
Figure 1. ResNet Block
+ +We present three ResNet models, which are converted from the models provided by the authors . The classfication errors tested in PaddlePaddle on 50,000 ILSVRC validation set with input images channel order of **BGR** by single scale with the shorter side of 256 and single crop as following table. +
+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + +
ResNetTop-1Model Size
ResNet-5024.9%99M
ResNet-10123.7%173M
ResNet-15223.2%234M
+
+ +## ResNet Model + +See ```demo/model_zoo/resnet/resnet.py```. This config contains network of 50, 101 and 152 layers. You can specify layer number by adding argument like ```--config_args=layer_num=50``` in command line arguments. + +### Network Visualization + +You can get a diagram of ResNet network by running the following commands. The script generates dot file and then converts dot file to PNG file, which uses installed draw_dot tool in our server. If you can not access the server, just install graphviz to convert dot file. + +``` +cd demo/model_zoo/resnet +./net_diagram.sh +``` + +### Model Download + +``` +cd demo/model_zoo/resnet +./get_model.sh +``` +You can run above command to download all models and mean file and save them in ```demo/model_zoo/resnet/model``` if downloading successfully. + +``` +mean_meta_224 resnet_101 resnet_152 resnet_50 +``` + * resnet_50: model of 50 layers. + * resnet_101: model of 101 layers. + * resnet_152: model of 152 layers. + * mean\_meta\_224: mean file with 3 x 224 x 224 size in **BGR** order. You also can use three mean values: 103.939, 116.779, 123.68. + +### Parameter Info + +* **Convolution Layer Weight** + + As batch normalization layer is connected after each convolution layer, there is no parameter of bias and only one weight in this layer. + shape: `(Co, ky, kx, Ci)` + * Co: channle number of output feature map. + * ky: filter size in vertical direction. + * kx: filter size in horizontal direction. + * Ci: channle number of input feature map. + + 2-Dim matrix: (Co * ky * kx, Ci), saved in row-major order. + +* **Fully connected Layer Weight** + + 2-Dim matrix: (input layer size, this layer size), saved in row-major order. + +* **[Batch Normalization]() Layer Weight** + +There are four parameters in this layer. In fact, only .w0 and .wbias are the learned parameters. The other two are therunning mean and variance respectively. They will be loaded in testing. Following table shows parameters of a batch normzalization layer. +
+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Parameter NameNumberMeaning
_res2_1_branch1_bn.w0256gamma, scale parameter
_res2_1_branch1_bn.w1256mean value of feature map
_res2_1_branch1_bn.w2256variance of feature map
_res2_1_branch1_bn.wbias256beta, shift parameter
+
+ +### Parameter Observation + +Users who want to observe the parameters can use python to read: + +``` +import sys +import numpy as np + +def load(file_name): + with open(file_name, 'rb') as f: + f.read(16) # skip header for float type. + return np.fromfile(f, dtype=np.float32) + +if __name__=='__main__': + weight = load(sys.argv[1]) +``` + +or simply use following shell command: + +``` +od -j 16 -f _res2_1_branch1_bn.w0 +``` + +## Feature Extraction + +We provide both C++ and Python interfaces to extract features. The following examples use data in `demo/model_zoo/resnet/example` to show the extracting process in detail. + +### C++ Interface + +First, specify image data list in `define_py_data_sources2` in the config, see example `demo/model_zoo/resnet/resnet.py`. + +``` + train_list = 'train.list' if not is_test else None + # mean.meta is mean file of ImageNet dataset. + # mean.meta size : 3 x 224 x 224. + # If you use three mean value, set like: + # "mean_value:103.939,116.779,123.68;" + args={ + 'mean_meta': "model/mean_meta_224/mean.meta", + 'image_size': 224, 'crop_size': 224, + 'color': True,'swap_channel:': [2, 1, 0]} + define_py_data_sources2(train_list, + 'example/test.list', + module="example.image_list_provider", + obj="processData", + args=args) +``` + +Second, specify layers to extract features in `Outputs()` of `resnet.py`. For example, + +``` +Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn") +``` + +Third, specify model path and output directory in `extract_fea_c++.sh`, and then run the following commands. + +``` +cd demo/model_zoo/resnet +./extract_fea_c++.sh +``` + +If successful, features are saved in `fea_output/rank-00000` as follows. And you can use `load_feature_c` interface in `load_feature.py ` to load such a file. + +``` +-0.115318 -0.108358 ... -0.087884;-1.27664 ... -1.11516 -2.59123; +-0.126383 -0.116248 ... -0.00534909;-1.42593 ... -1.04501 -1.40769; +``` + +* Each line stores features of a sample. Here, the first line stores features of `example/dog.jpg` and second line stores features of `example/cat.jpg`. +* Features of different layers are splitted by `;`, and their order is consistent with the layer order in `Outputs()`. Here, the left features are `res5_3_branch2c_conv` layer and right features are `res5_3_branch2c_bn` layer. + +### Python Interface + +`demo/model_zoo/resnet/classify.py` is an example to show how to use python to extract features. Following example still uses data of `./example/test.list`. Command is as follows: + +``` +cd demo/model_zoo/resnet +./extract_fea_py.sh +``` + +extract_fea_py.sh: + +``` +python classify.py \ + --job=extract \ + --conf=resnet.py\ + --use_gpu=1 \ + --mean=model/mean_meta_224/mean.meta \ + --model=model/resnet_50 \ + --data=./example/test.list \ + --output_layer="res5_3_branch2c_conv,res5_3_branch2c_bn" \ + --output_dir=features + +``` +* \--job=extract: specify job mode to extract feature. +* \--conf=resnet.py: network configure. +* \--use_gpu=1: speficy GPU mode. +* \--model=model/resnet_5: model path. +* \--data=./example/test.list: data list. +* \--output_layer="xxx,xxx": specify layers to extract features. +* \--output_dir=features: output diretcoty. + +Note, since the convolution layer in these ResNet models is suitable for the cudnn implementation which only support GPU. It not support CPU mode because of compatibility issue and we will fix later. + +If run successfully, you will see features saved in `features/batch_0`, this file is produced with cPickle. You can use `load_feature_py` interface in `load_feature.py` to open the file, and it returns a dictionary as follows: + +``` +{ +'cat.jpg': {'res5_3_branch2c_conv': array([[-0.12638293, -0.116248 , -0.11883899, ..., -0.00895038, 0.01994277, -0.00534909]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.42593431, -1.28918779, -1.32414699, ..., -1.45933616, -1.04501402, -1.40769434]], dtype=float32)}, +'dog.jpg': {'res5_3_branch2c_conv': array([[-0.11531784, -0.10835785, -0.08809858, ...,0.0055237, 0.01505112, -0.08788397]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.27663755, -1.18272924, -0.90937918, ..., -1.25178063, -1.11515927, -2.59122872]], dtype=float32)} +} +``` + +Observed carefully, these feature values are consistent with the above results extracted by C++ interface. + +## Prediction + +`classify.py` also can be used to predict. We provide an example script `predict.sh` to predict data in `example/test.list` using a ResNet model with 50 layers. + +``` +cd demo/model_zoo/resnet +./predict.sh +``` + +predict.sh calls the `classify.py`: + +``` +python classify.py \ + --job=predict \ + --conf=resnet.py\ + --multi_crop \ + --model=model/resnet_50 \ + --use_gpu=1 \ + --data=./example/test.list +``` +* \--job=extract: speficy job mode to predict. +* \--conf=resnet.py: network configure. +* \--multi_crop: use 10 crops and average predicting probability. +* \--use_gpu=1: speficy GPU mode. +* \--model=model/resnet_50: model path. +* \--data=./example/test.list: data list. + +If run successfully, you will see following results, where 156 and 285 are labels of the images. + +``` +Label of example/dog.jpg is: 156 +Label of example/cat.jpg is: 282 +``` diff --git a/release_doc/0.9.0/doc/_sources/demo/index.txt b/release_doc/0.9.0/doc/_sources/demo/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..289199d496eb3b527fa8c8261820bc8e4d301786 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/index.txt @@ -0,0 +1,21 @@ +# Examples and demos +There are serveral examples and demos here. + +## Image + +* [Image Classification](image_classification/index.rst) + +## NLP + +* [Sentiment Analysis](sentiment_analysis/index.rst) +* [Text Generation](text_generation/index.rst) +* [Semantic Role Labeling](semantic_role_labeling/index.rst) + +## Recommendation + +* [MovieLens Dataset](rec/ml_dataset.md) +* [MovieLens Regression](rec/ml_regression.rst) + +## Model Zoo +* [ImageNet: ResNet](imagenet_model/resnet_model.md) +* [Embedding: Chinese Word](embedding_model/index.md) diff --git a/release_doc/0.9.0/doc/_sources/demo/quick_start/index_en.txt b/release_doc/0.9.0/doc/_sources/demo/quick_start/index_en.txt new file mode 100644 index 0000000000000000000000000000000000000000..659485d9be1b6a3e9759a2fd040cb09d1f2a3005 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/quick_start/index_en.txt @@ -0,0 +1,562 @@ +# Quick Start + +This tutorial will teach the basics of deep learning (DL), including how to implement many different models in PaddlePaddle. You will learn how to: + - Prepare data into the standardized format that PaddlePaddle accepts. + - Write data providers that read data into PaddlePaddle. + - Configure neural networks in PaddlePaddle layer by layer. + - Train models. + - Perform inference with trained models. + + +## Install + +To get started, please install PaddlePaddle on your computer. Throughout this tutorial, you will learn by implementing different DL models for text classification. + +To install PaddlePaddle, please follow the instructions here: Build and Install. + +## Overview +For the first step, you will use PaddlePaddle to build a **text classification** system. For example, suppose you run an e-commence website, and you want to analyze the sentiment of user reviews to evaluate product quality. + +For example, given the input + +``` +This monitor is fantastic. +``` + +Your classifier should output “positive”, since this text snippet shows that the user is satisfied with the product. Given this input: + +``` +The monitor breaks down two months after purchase. +``` + +the classifier should output “negative“. + +To build your text classification system, your code will need to perform five steps: +
![](./Pipeline_en.jpg)
+ + - Preprocess data into a standardized format. + - Provide data to the learning model. + - Specify the neural network structure. + - Train the model. + - Inference (make prediction on test examples). + + +1. Preprocess data into standardized format + - In the text classification example, you will start with a text file with one training example per line. Each line contains category id (in machine learning, often denoted the target y), followed by the input text (often denoted x); these two elements are separated by a Tab. For example: ```positive [tab] This monitor is fantastic```. You will preprocess this raw data into a format that Paddle can use. + +2. Provide data to the learning model. + - You can write data providers in Python. For any required data preprocessing step, you can add the preprocessing code to the PyDataProvider Python file. + - In our text classification example, every word or character will be converted into an integer id, specified in a dictionary file. It perform a dictionary lookup in PyDataProvider to get the id. +3. Specify neural network structure. (From easy to hard, we provide 4 kinds of network configurations) + - A logistic regression model. + - A word embedding model. + - A convolutional neural network model. + - A sequential recurrent neural network model. + - You will also learn different learning algorithms. +4. Training model. +5. Inference. + +## Preprocess data into standardized format +In this example, you are going to use [Amazon electronic product review dataset](http://jmcauley.ucsd.edu/data/amazon/) to build a bunch of deep neural network models for text classification. Each text in this dataset is a product review. This dataset has two categories: “positive” and “negative”. Positive means the reviewer likes the product, while negative means the reviewer does not like the product. + +`demo/quick_start` in the [source code](https://github.com/baidu/Paddle) provides scripts for downloading data and preprocessing data as shown below. The data process takes several minutes (about 3 minutes in our machine). + +```bash +cd demo/quick_start +./data/get_data.sh +./preprocess.sh +``` + +## Transfer Data to Model +### Write Data Provider with Python +The following `dataprovider_bow.py` gives a complete example of writing data provider with Python. It includes the following parts: + +* initalizer: define the additional meta-data of the data provider and the types of the input data. +* process: Each `yield` returns a data sample. In this case, it return the text representation and category id. The order of features in the returned result needs to be consistent with the definition of the input types in `initalizer`. + +```python +from paddle.trainer.PyDataProvider2 import * + +# id of the word not in dictionary +UNK_IDX = 0 + +# initializer is called by the framework during initialization. +# It allows the user to describe the data types and setup the +# necessary data structure for later use. +# `settings` is an object. initializer need to properly fill settings.input_types. +# initializer can also store other data structures needed to be used at process(). +# In this example, dictionary is stored in settings. +# `dictionay` and `kwargs` are arguments passed from trainer_config.lr.py +def initializer(settings, dictionary, **kwargs): + # Put the word dictionary into settings + settings.word_dict = dictionary + + # setting.input_types specifies what the data types the data provider + # generates. + settings.input_types = [ + # The first input is a sparse_binary_vector, + # which means each dimension of the vector is either 0 or 1. It is the + # bag-of-words (BOW) representation of the texts. + sparse_binary_vector(len(dictionary)), + # The second input is an integer. It represents the category id of the + # sample. 2 means there are two labels in the dataset. + # (1 for positive and 0 for negative) + integer_value(2)] + +# Delaring a data provider. It has an initializer 'data_initialzer'. +# It will cache the generated data of the first pass in memory, so that +# during later pass, no on-the-fly data generation will be needed. +# `setting` is the same object used by initializer() +# `file_name` is the name of a file listed train_list or test_list file given +# to define_py_data_sources2(). See trainer_config.lr.py. +@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) +def process(settings, file_name): + # Open the input data file. + with open(file_name, 'r') as f: + # Read each line. + for line in f: + # Each line contains the label and text of the comment, separated by \t. + label, comment = line.strip().split('\t') + + # Split the words into a list. + words = comment.split() + + # convert the words into a list of ids by looking them up in word_dict. + word_vector = [settings.word_dict.get(w, UNK_IDX) for w in words] + + # Return the features for the current comment. The first is a list + # of ids representing a 0-1 binary sparse vector of the text, + # the second is the integer id of the label. + yield word_vector, int(label) +``` + +### Define Python Data Provider in Configuration files. +You need to add a data provider definition `define_py_data_sources2` in our network configuration. This definition specifies: + +- The path of the training and testing data (`data/train.list`, `data/test.list`). +- The location of the data provider file (`dataprovider_bow`). +- The function to call to get data. (`process`). +- Additional arguments or data. Here it passes the path of word dictionary. + +```python +from paddle.trainer_config_helpers import * + +file = "data/dict.txt" +word_dict = dict() +with open(dict_file, 'r') as f: + for i, line in enumerate(f): + w = line.strip().split()[0] + word_dict[w] = i +# define the data sources for the model. +# We need to use different process for training and prediction. +# For training, the input data includes both word IDs and labels. +# For prediction, the input data only includs word Ids. +define_py_data_sources2(train_list='data/train.list', + test_list='data/test.list', + module="dataprovider_bow", + obj="process", + args={"dictionary": word_dict}) +``` +You can refer to the following link for more detailed examples and data formats: PyDataProvider2. + +## Network Architecture +You will describe four kinds of network architectures in this section. +
![](./PipelineNetwork_en.jpg)
+ +First, you will build a logistic regression model. Later, you will also get chance to build other more powerful network architectures. +For more detailed documentation, you could refer to: Layer documentation。All configuration files are in `demo/quick_start` directory. + +### Logistic Regression +The architecture is illustrated in the following picture: +
![](./NetLR_en.png)
+ +- You need define the data for text features. The size of the data layer is the number of words in the dictionary. + +```python +word = data_layer(name="word", size=voc_dim) +``` + +- You also need to define the category id for each example. The size of the data layer is the number of labels. + +```python +label = data_layer(name="label", size=label_dim) +``` + +- It uses logistic regression model to classify the vector, and it will output the classification error during training. + - Each layer has an *input* argument that specifies its input layer. Some layers can have multiple input layers. You can use a list of the input layers as input in that case. + - *size* for each layer means the number of neurons of the layer. + - *act_type* means activation function applied to the output of each neuron independently. + - Some layers can have additional special inputs. For example, `classification_cost` needs ground truth label as input to compute classification loss and error. +```python +# Define a fully connected layer with logistic activation (also called softmax activation). +output = fc_layer(input=word, + size=label_dim, + act_type=SoftmaxActivation()) +# Define cross-entropy classification loss and error. +classification_cost(input=output, label=label) +``` + +Performance summary: You can refer to the training and testing scripts later. In order to compare different network architectures, the model complexity and test classification error are listed in the following table: + + +
+ + + + + + + + + + + + + + + + + +
Network nameNumber of parametersTest error
Logistic regression252 KB8.652%
+ +
+ +### Word Embedding Model +In order to use the word embedding model, you need to change the data provider a little bit to make the input words as a sequence of word IDs. The revised data provider `dataprovider_emb.py` is listed below. You only need to change initializer() for the type of the first input. It is changed from sparse_binary_vector to sequence of intergers. process() remains the same. This data provider can also be used for later sequence models. + +```python +def initializer(settings, dictionary, **kwargs): + # Put the word dictionary into settings + settings.word_dict = dictionary + settings.input_types = [ + # Define the type of the first input as a sequence of integers. + integer_value_sequence(len(dictionary)), + # Define the second input for label id + integer_value(2)] + +@provider(init_hook=initializer) +def process(settings, file_name): + ... + # omitted, it is same as the data provider for LR model +``` + +This model is very similar to the framework of logistic regression, but it uses word embedding vectors instead of a sparse vectors to represent words. +
![](./NetContinuous_en.png)
+ +- It can look up the dense word embedding vector in the dictionary (its words embedding vector is `word_dim`). The input is a sequence of N words, the output is N word_dim dimensional vectors. + +```python +emb = embedding_layer(input=word, dim=word_dim) +``` + +- It averages all the word embedding in a sentence to get its sentence representation. + +```python +avg = pooling_layer(input=emb, pooling_type=AvgPooling()) +``` + +The other parts of the model are the same as logistic regression network. + +The performance is summarized in the following table: + + +
+ + + + + + + + + + + + + + + + + +
Network nameNumber of parametersTest error
Word embedding model15 MB8.484%
+
+
+ +### Convolutional Neural Network Model +Convolutional neural network converts a sequence of word embeddings into a sentence representation using temporal convolutions. You will transform the fully connected layer of the word embedding model to 3 new sub-steps. +
![](./NetConv_en.png)
+ + +Text convolution has 3 steps: +1. Get K nearest neighbor context of each word in a sentence, stack them into a 2D vector representation. +2. Apply temporal convolution to this representation to produce a new hidden_dim dimensional vector. +3. Apply max-pooling to the new vectors at all the time steps in a sentence to get a sentence representation. + +```python +# context_len means convolution kernel size. +# context_start means the start of the convolution. It can be negative. In that case, zero padding is applied. +text_conv = sequence_conv_pool(input=emb, + context_start=k, + context_len=2 * k + 1) +``` + +The performance is summarized in the following table: + + +
+ + + + + + + + + + + + + + + + + +
Network nameNumber of parametersTest error
Convolutional model16 MB5.628%
+
+ +### Recurrent Model +
![](./NetRNN_en.png)
+ +You can use Recurrent neural network as our time sequence model, including simple RNN model, GRU model, and LSTM model。 + +- GRU model can be specified via: + +```python +gru = simple_gru(input=emb, size=gru_size) +``` + +- LSTM model can be specified via: + +```python +lstm = simple_lstm(input=emb, size=lstm_size) +``` + +You can use single layer LSTM model with Dropout for our text classification problem. The performance is summarized in the following table: + + +
+ + + + + + + + + + + + + + + + + +
Network nameNumber of parametersTest error
Recurrent model16 MB4.812%
+ +
+ +## Optimization Algorithm +Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network. + +```python +settings(batch_size=128, + learning_rate=2e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(8e-4), + gradient_clipping_threshold=25) +``` + +## Training Model +After completing data preparation and network architecture specification, you will run the training script. +
![](./PipelineTrain_en.png)
+ +Training script: our training script is in `train.sh` file. The training arguments are listed below: + +```bash +paddle train \ +--config=trainer_config.py \ +--log_period=20 \ +--save_dir=./output \ +--num_passes=15 \ +--use_gpu=false +``` + +If you want to install the remote training platform, which enables distributed training on clusters, follow the instructions here: Platform documentation. We do not provide examples on how to train on clusters. Please refer to other demos or platform training documentation for mode details on training on clusters. +## Inference +You can use the trained model to perform prediction on the dataset with no labels. You can also evaluate the model on dataset with labels to obtain its test accuracy. +
![](./PipelineTest_en.png)
+ +The test script is listed below. PaddlePaddle can evaluate a model on the data with labels specified in `test.list`. + +```bash +paddle train \ +--config=trainer_config.lstm.py \ +--use_gpu=false \ +--job=test \ +--init_model_path=./output/pass-0000x +``` + +We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to: Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation. + +inference script (predict.sh): + +```bash +model="output/pass-00003" +paddle train \ + --config=trainer_config.lstm.py \ + --use_gpu=false \ + --job=test \ + --init_model_path=$model \ + --config_args=is_predict=1 \ + --predict_output_dir=. \ + +mv rank-00000 result.txt +``` +User can choose the best model base on the training log instead of model `output/pass-00003`. There are several differences between training and inference network configurations. +- You do not need labels during inference. +- Outputs need to be specified to the classification probability layer (the output of softmax layer), or the id of maximum probability (`max_id` layer). An example to output the id and probability is given in the code snippet. +- batch_size = 1. +- You need to specify the location of `test_list` in the test data. + +The results in `result.txt` is as follows, each line is one sample. + +``` +predicted_label_id;probability_of_label_0 probability_of_label_1 # the first sample +predicted_label_id;probability_of_label_0 probability_of_label_1 # the second sample +``` + + +```python +is_predict = get_config_arg('is_predict', bool, False) +trn = 'data/train.list' if not is_predict else None +tst = 'data/test.list' if not is_predict else 'data/pred.list' +obj = 'process' if not is_predict else 'process_pre' +batch_size = 128 if not is_predict else 1 +if is_predict: + maxid = maxid_layer(output) + outputs([maxid,output]) +else: + label = data_layer(name="label", size=2) + cls = classification_cost(input=output, label=label) outputs(cls) +``` + +## Summary +The scripts of data downloading, network configurations, and training scrips are in `/demo/quick_start`. The following table summarizes the performance of our network architecture on Amazon-Elec dataset(25k): + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Network nameNumber of parametersError rateConfiguration file name
Logistic regression model(BOW) 252KB 8.652%trainer_config.lr.py
Word embedding 15MB 8.484%trainer_config.emb.py
Convolution model 16MB 5.628%trainer_config.cnn.py
Time sequence model 16MB 4.812%trainer_config.lstm.py
+
+
+ +## Appendix +### Command Line Argument + +* \--config:network architecture path. +* \--save_dir:model save directory. +* \--log_period:the logging period per batch. +* \--num_passes:number of training passes. One pass means the training would go over the whole training dataset once. +* \--config_args:Other configuration arguments. +* \--init_model_path:The path of the initial model parameter. + +By default, the trainer will save model every pass. You can also specify `saving_period_by_batches` to set the frequency of batch saving. You can use `show_parameter_stats_period` to print the statistics of the parameters, which are very useful for tuning parameters. Other command line arguments can be found in command line argument documentation。 + +### Log + +``` +TrainerInternal.cpp:160] Batch=20 samples=2560 AvgCost=0.628761 CurrentCost=0.628761 Eval: classification_error_evaluator=0.304297 CurrentEval: classification_error_evaluator=0.304297 +``` +During model training, you will see the log like the examples above: +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameExplanation
Batch=20 You have trained 20 batches.
samples=2560 You have trained 2560 examples.
AvgCost The average cost from the first batch to the current batch.
CurrentCost the average cost of the last log_period batches
Eval: classification_error_evaluator The average classification error from the first batch to the current batch.
CurrentEval: classification_error_evaluator The average error rate of the last log_period batches
+
+
diff --git a/release_doc/0.9.0/doc/_sources/demo/rec/ml_dataset.txt b/release_doc/0.9.0/doc/_sources/demo/rec/ml_dataset.txt new file mode 100644 index 0000000000000000000000000000000000000000..c93a4585e4027b1912da8a77c2562d1ee69c5366 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/rec/ml_dataset.txt @@ -0,0 +1,107 @@ +# MovieLens Dataset + +The [MovieLens Dataset](http://grouplens.org/datasets/movielens/) was collected by GroupLens Research. +The data set contains some user information, movie information, and many movie ratings from \[1-5\]. +The data sets have many version depending on the size of set. +We use [MovieLens 1M Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip) as a demo dataset, which contains +1 million ratings from 6000 users on 4000 movies. Released 2/2003. + +## Dataset Features + +In [ml-1m Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip), there are many features in these dataset. +The data files (which have ".dat" extension) in [ml-1m Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip) +is basically CSV file that delimiter is "::". The description in README we quote here. + +### RATINGS FILE DESCRIPTION(ratings.dat) + + +All ratings are contained in the file "ratings.dat" and are in the +following format: + +UserID::MovieID::Rating::Timestamp + +- UserIDs range between 1 and 6040 +- MovieIDs range between 1 and 3952 +- Ratings are made on a 5-star scale (whole-star ratings only) +- Timestamp is represented in seconds since the epoch as returned by time(2) +- Each user has at least 20 ratings + +### USERS FILE DESCRIPTION(users.dat) + +User information is in the file "users.dat" and is in the following +format: + +UserID::Gender::Age::Occupation::Zip-code + +All demographic information is provided voluntarily by the users and is +not checked for accuracy. Only users who have provided some demographic +information are included in this data set. + +- Gender is denoted by a "M" for male and "F" for female +- Age is chosen from the following ranges: + + * 1: "Under 18" + * 18: "18-24" + * 25: "25-34" + * 35: "35-44" + * 45: "45-49" + * 50: "50-55" + * 56: "56+" + +- Occupation is chosen from the following choices: + + * 0: "other" or not specified + * 1: "academic/educator" + * 2: "artist" + * 3: "clerical/admin" + * 4: "college/grad student" + * 5: "customer service" + * 6: "doctor/health care" + * 7: "executive/managerial" + * 8: "farmer" + * 9: "homemaker" + * 10: "K-12 student" + * 11: "lawyer" + * 12: "programmer" + * 13: "retired" + * 14: "sales/marketing" + * 15: "scientist" + * 16: "self-employed" + * 17: "technician/engineer" + * 18: "tradesman/craftsman" + * 19: "unemployed" + * 20: "writer" + +### MOVIES FILE DESCRIPTION(movies.dat) + +Movie information is in the file "movies.dat" and is in the following +format: + +MovieID::Title::Genres + +- Titles are identical to titles provided by the IMDB (including +year of release) +- Genres are pipe-separated and are selected from the following genres: + + * Action + * Adventure + * Animation + * Children's + * Comedy + * Crime + * Documentary + * Drama + * Fantasy + * Film-Noir + * Horror + * Musical + * Mystery + * Romance + * Sci-Fi + * Thriller + * War + * Western + +- Some MovieIDs do not correspond to a movie due to accidental duplicate +entries and/or test entries +- Movies are mostly entered by hand, so errors and inconsistencies may exist diff --git a/release_doc/0.9.0/doc/_sources/demo/rec/ml_regression.txt b/release_doc/0.9.0/doc/_sources/demo/rec/ml_regression.txt new file mode 100644 index 0000000000000000000000000000000000000000..0c14e4f5bb7f815a06c0c756b1a6e6ef9099fd66 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/rec/ml_regression.txt @@ -0,0 +1,359 @@ +Regression MovieLens Ratting +============================ + +Here we demonstrate a **Cosine Similarity Regression** job in movie lens dataset. +This demo will show how paddle does (word) embedding job, +handles the similarity regression, +the character-level convolutional networks for text, and how does paddle handle +multiple types of inputs. +Note that the model structure is not fine-tuned and just a demo to show how paddle works. + + +YOU ARE WELCOME TO BUILD A BETTER DEMO +BY USING PADDLEPADDLE, AND LET US KNOW TO MAKE THIS DEMO BETTER. + +Data Preparation +```````````````` +Download and extract dataset +'''''''''''''''''''''''''''' +We use `movielens 1m dataset `_ here. +To download and unzip the dataset, simply run the following commands. + +.. code-block:: bash + + cd demo/recommendation/data + ./ml_data.sh + +And the directory structure of :code:`demo/recommendation/data/ml-1m` is: + +.. code-block:: text + + +--ml-1m + +--- movies.dat # movie features + +--- ratings.dat # ratings + +--- users.dat # user features + +--- README # dataset description + +Field config file +''''''''''''''''' +**Field config file** is used to specific the fields dataset and file format, +i.e, specific **WHAT** type it is in each feature file. + +The field config file of ml-1m shows in :code:`demo/recommendation/data/config.json`. +It specifics the field types and file names: 1) there are four types of field for user file\: id, gender, age and occupation; +2) the filename is "users.dat", and the delimiter of file is "::". + +.. include:: ../../../demo/recommendation/data/config.json + :code: json + :literal: + +Preprocess Data +``````````````` +You need to install python 3rd party libraries. +IT IS HIGHLY RECOMMEND TO USE VIRTUALENV MAKE A CLEAN PYTHON ENVIRONMENT. + +.. code-block:: bash + + pip install -r requirements.txt + +The general command for preprocessing the dataset is: + +.. code-block:: bash + + cd demo/recommendation + ./preprocess.sh + +And the detail steps are introduced as follows. + +Extract Movie/User features to python object +''''''''''''''''''''''''''''''''''''''''''''' + +There are many features in movie or user in movielens 1m dataset. +Each line of rating file just provides a Movie/User id to refer each movie or user. +We process the movie/user feature file first, and pickle the feature (**Meta**) object as a file. + +Meta config file +................ + +**Meta config file** is used to specific **HOW** to parse each field in dataset. +It could be translated from field config file, or written by hand. +Its file format could be either json or yaml syntax file. Parser will automatically choose the file format by extension name. + +To convert Field config file to meta config file, just run: + +.. code-block:: bash + + cd demo/recommendation/data + python config_generator.py config.json > meta_config.json + +The meta config file shows below: + +.. include:: ../../../demo/recommendation/data/meta_config.json + :code: json + :literal: + +There are two kinds of features in meta\: movie and user. + +* in movie file, whose name is movies.dat + * we just split each line by "::" + * pos 0 is id. + * pos 1 feature: + * name is title. + * it uses regex to parse this feature. + * it is a char based word embedding feature. + * it is a sequence. + * pos 2 feature: + * name is genres. + * type is one hot dense vector. + * dictionary is auto generated by parsing, each key is split by '|' +* in user file, whose name is users.dat + * we just split each line by "::" + * pos 0 is id. + * pos 1 feature: + * name is gender + * just simple char based embedding. + * pos 2 feature: + * name is age + * just whole word embedding. + * embedding id will be sort by word. + * pos 3 feature: + * name is occupation. + * just simple whole word embedding. + + +Meta file +''''''''' + +After having meta config file, we can generate **Meta file**, a python pickle object which stores movie/user information. +The following commands could be run to generate it. + +.. code-block:: bash + + python meta_generator.py ml-1m meta.bin --config=meta_config.json + +And the structure of the meta file :code:`meta.bin` is: + +.. code-block:: text + + +--+ movie + | +--+ __meta__ + | | +--+ raw_meta # each feature meta config. list + | | | + + | | | | # ID Field, we use id as key + | | | +--+ {'count': 3883, 'max': 3952, 'is_key': True, 'type': 'id', 'min': 1} + | | | | + | | | | # Titile field, the dictionary list of embedding. + | | | +--+ {'dict': [ ... ], 'type': 'embedding', 'name': 'title', 'seq': 'sequence'} + | | | | + | | | | # Genres field, the genres dictionary + | | | +--+ {'dict': [ ... ], 'type': 'one_hot_dense', 'name': 'genres'} + | | | + | | +--+ feature_map [1, 2] # a list for raw_meta index for feature field. + | | # it means there are 2 features for each key. + | | # * 0 offset of feature is raw_meta[1], Title. + | | # * 1 offset of feature is raw_meta[2], Genres. + | | + | +--+ 1 # movie 1 features + | | + + | | +---+ [[...], [...]] # title ids, genres dense vector + | | + | +--+ 2 + | | + | +--+ ... + | + +--- user + +--+ __meta__ + | + + | +--+ raw_meta + | | + + | | +--+ id field as user + | | | + | | +--+ {'dict': ['F', 'M'], 'type': 'embedding', 'name': 'gender', 'seq': 'no_sequence'} + | | | + | | +--+ {'dict': ['1', '18', '25', '35', '45', '50', '56'], 'type': 'embedding', 'name': 'age', 'seq': 'no_sequence'} + | | | + | | +--+ {'dict': [...], 'type': 'embedding', 'name': 'occupation', 'seq': 'no_sequence'} + | | + | +--+ feature_map [1, 2, 3] + | + +--+ 1 # user 1 features + | + +--+ 2 + +--+ ... + + +Split Training/Testing files +'''''''''''''''''''''''''''' + +We split :code:`ml-1m/ratings.dat` into a training and testing file. The way to split file is for each user, we split the +rating by two parts. So each user in testing file will have some rating information in training file. + +Use separate.py to separate the training and testing file. + +.. code-block:: bash + + python split.py ml-1m/ratings.dat --delimiter="::" --test_ratio=0.1 + +Then two files will be generated\: :code:`ml-1m/ratings.dat.train` and :code:`ml-1m/rating.data.test`. +Move them to workspace :code:`data`, shuffle the train file, and prepare the file list for paddle train. + +.. code-block:: bash + + shuf ml-1m/ratings.dat.train > ratings.dat.train + cp ml-1m/ratings.dat.test . + echo "./data/ratings.dat.train" > train.list + echo "./data/ratings.dat.test" > test.list + + +Neural Network Configuration +```````````````````````````` + +Trainer Config File +''''''''''''''''''' + +The network structure shows below. + +.. image:: rec_regression_network.png + :align: center + :alt: rec_regression_network + +The demo's neural network config file "trainer_config.py" show as below. + +.. literalinclude:: ../../../demo/recommendation/trainer_config.py + :language: python + :lines: 15- + +In this :code:`trainer_config.py`, we just map each feature type to +a feature vector, following shows how to map each feature to a vector shows below. + +* :code:`id`\: Just simple embedding, and then add to fully connected layer. +* :code:`embedding`\: + - if is_sequence, get the embedding and do a text convolutional operation, + get the average pooling result. + - if not sequence, get the embedding and add to fully connected layer. +* :code:`one_host_dense`\: + - just two fully connected layer. + +Then we combine each features of movie into one movie feature by a +:code:`fc_layer` with multiple inputs, and do the same thing to user features, +get one user feature. Then we calculate the cosine similarity of these two +features. + +In these network, we use several api in `trainer_config_helpers +<../../ui/api/trainer_config_helpers/index.html>`_. There are + +* Data Layer, `data_layer + <../../ui/api/trainer_config_helpers/layers.html#id1>`_ +* Fully Connected Layer, `fc_layer + <../../ui/api/trainer_config_helpers/layers.html#fc-layer>`_ +* Embedding Layer, `embedding_layer + <../../ui/api/trainer_config_helpers/layers.html#embedding-layer>`_ +* Context Projection Layer, `context_projection + <../../ui/api/trainer_config_helpers/layers.html#context-projection>`_ +* Pooling Layer, `pooling_layer + <../../ui/api/trainer_config_helpers/layers.html#pooling-layer>`_ +* Cosine Similarity Layer, `cos_sim + <../../ui/api/trainer_config_helpers/layers.html#cos-sim>`_ +* Text Convolution Pooling Layer, `text_conv_pool + <../../ui/api/trainer_config_helpers/networks.html + #trainer_config_helpers.networks.text_conv_pool>`_ +* Declare Python Data Sources, `define_py_data_sources2 + <../../ui/api/trainer_config_helpers/data_sources.html>`_ + +Data Provider +''''''''''''' + +.. literalinclude:: ../../../demo/recommendation/dataprovider.py + :language: python + :lines: 15- + +The data provider just read the meta.bin and rating file, yield each sample for training. +In this :code:`dataprovider.py`, we should set\: + +* obj.slots\: The feature types and dimension. +* use_seq\: Whether this :code:`dataprovider.py` in sequence mode or not. +* process\: Return each sample of data to :code:`paddle`. + +The data provider details document see `there <../../ui/data_provider/pydataprovider2.html>`_. + +Train +````` + +After prepare data, config network, writting data provider, now we can run paddle training. + +The run.sh is shown as follow: + +.. literalinclude:: ../../../demo/recommendation/run.sh + :language: bash + :lines: 16- + +It just start a paddle training process, write the log to `log.txt`, +then print it on screen. + +Each command line argument in :code:`run.sh`, please refer to the `command line +arguments <../../ui/index.html#command-line-argument>`_ page. The short description of these arguments is shown as follow. + +* config\: Tell paddle which file is neural network configuration. +* save_dir\: Tell paddle save model into './output' +* use_gpu\: Use gpu or not. Default is false. +* trainer_count\: The compute thread in one machine. +* test_all_data_in_one_period\: Test All Data during one test period. Otherwise, + will test a :code:`batch_size` data in one test period. +* log_period\: Print log after train :code:`log_period` batches. +* dot_period\: Print a :code:`.` after train :code:`dot_period` batches. +* num_passes\: Train at most :code:`num_passes`. + +If training process starts successfully, the output likes follow: + +.. code-block:: text + + I0601 08:07:22.832059 10549 TrainerInternal.cpp:157] Batch=100 samples=160000 AvgCost=4.13494 CurrentCost=4.13494 Eval: CurrentEval: + + I0601 08:07:50.672627 10549 TrainerInternal.cpp:157] Batch=200 samples=320000 AvgCost=3.80957 CurrentCost=3.48421 Eval: CurrentEval: + + I0601 08:08:18.877369 10549 TrainerInternal.cpp:157] Batch=300 samples=480000 AvgCost=3.68145 CurrentCost=3.42519 Eval: CurrentEval: + + I0601 08:08:46.863963 10549 TrainerInternal.cpp:157] Batch=400 samples=640000 AvgCost=3.6007 CurrentCost=3.35847 Eval: CurrentEval: + + I0601 08:09:15.413025 10549 TrainerInternal.cpp:157] Batch=500 samples=800000 AvgCost=3.54811 CurrentCost=3.33773 Eval: CurrentEval: + I0601 08:09:36.058670 10549 TrainerInternal.cpp:181] Pass=0 Batch=565 samples=902826 AvgCost=3.52368 Eval: + I0601 08:09:46.215489 10549 Tester.cpp:101] Test samples=97383 cost=3.32155 Eval: + I0601 08:09:46.215966 10549 GradientMachine.cpp:132] Saving parameters to ./output/model/pass-00000 + I0601 08:09:46.233397 10549 ParamUtil.cpp:99] save dir ./output/model/pass-00000 + I0601 08:09:46.233438 10549 Util.cpp:209] copy trainer_config.py to ./output/model/pass-00000 + I0601 08:09:46.233541 10549 ParamUtil.cpp:147] fileName trainer_config.py + +The model is saved in :code:`output/` directory. You can use :code:`Ctrl-C` to stop training whenever you want. + +Evaluate and Predict +```````````````````` + +After training several passes, you can evaluate them and get the best pass. Just run + +.. code-block:: bash + + ./evaluate.sh + +You will see messages like this: + +.. code-block:: text + + Best pass is 00009, error is 3.06949, which means predict get error as 0.875998002281 + evaluating from pass output/pass-00009 + +Then, you can predict what any user will rate a movie. Just run + +.. code-block:: bash + + python prediction.py 'output/pass-00009/' + +Predictor will read user input, and predict scores. It has a command-line user interface as follows: + +.. code-block:: text + + Input movie_id: 9 + Input user_id: 4 + Prediction Score is 2.56 + Input movie_id: 8 + Input user_id: 2 + Prediction Score is 3.13 diff --git a/release_doc/0.9.0/doc/_sources/demo/semantic_role_labeling/index.txt b/release_doc/0.9.0/doc/_sources/demo/semantic_role_labeling/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff3035059bd77a8688714db484e420b113c73e53 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/semantic_role_labeling/index.txt @@ -0,0 +1,7 @@ +Semantic Role Labeling Tutorial +=============================== + +.. toctree:: + :maxdepth: 3 + + semantic_role_labeling.md diff --git a/release_doc/0.9.0/doc/_sources/demo/semantic_role_labeling/semantic_role_labeling.txt b/release_doc/0.9.0/doc/_sources/demo/semantic_role_labeling/semantic_role_labeling.txt new file mode 100644 index 0000000000000000000000000000000000000000..890f7314582c65e9add50664006b57aa4e0709eb --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/semantic_role_labeling/semantic_role_labeling.txt @@ -0,0 +1,183 @@ +# Semantic Role labeling Tutorial # + +Semantic role labeling (SRL) is a form of shallow semantic parsing whose goal is to discover the predicate-argument structure of each predicate in a given input sentence. SRL is useful as an intermediate step in a wide range of natural language processing tasks, such as information extraction. automatic document categorization and question answering. An instance is as following [1]: + + [ A0 He ] [ AM-MOD would ][ AM-NEG n’t ] [ V accept] [ A1 anything of value ] from [A2 those he was writing about ]. + +- V: verb +- A0: acceptor +- A1: thing accepted +- A2: accepted-from +- A3: Attribute +- AM-MOD: modal +- AM-NEG: negation + +Given the verb "accept", the chunks in sentence would play certain semantic roles. Here, the label scheme is from Penn Proposition Bank. + +To this date, most of the successful SRL systems are built on top of some form of parsing results where pre-defined feature templates over the syntactic structure are used. This tutorial will present an end-to-end system using deep bidirectional long short-term memory (DB-LSTM)[2] for solving the SRL task, which largely outperforms the previous state-of-the-art systems. The system regards SRL task as the sequence labelling problem. + +## Data Description +The relevant paper[2] takes the data set in CoNLL-2005&2012 Shared Task for training and testing. Accordingto data license, the demo adopts the test data set of CoNLL-2005, which can be reached on website. + +To download and process the original data, user just need to execute the following command: + +```bash +cd data +./get_data.sh +``` +Several new files appear in the `data `directory as follows. +```bash +conll05st-release:the test data set of CoNll-2005 shared task +test.wsj.words:the Wall Street Journal data sentences +test.wsj.props: the propositional arguments +src.dict:the dictionary of words in sentences +tgt.dict:the labels dictionary +feature: the extracted features from data set +``` + +## Training +### DB-LSTM +Please refer to the Sentiment Analysis demo to learn more about the long short-term memory unit. + +Unlike Bidirectional-LSTM that used in Sentiment Analysis demo, the DB-LSTM adopts another way to stack LSTM layer. First a standard LSTM processes the sequence in forward direction. The input and output of this LSTM layer are taken by the next LSTM layer as input, processed in reversed direction. These two standard LSTM layers compose a pair of LSTM. Then we stack LSTM layers pair after pair to obtain the deep LSTM model. + +The following figure shows a temporal expanded 2-layer DB-LSTM network. +
+![pic](./network_arch.png) +
+ +### Features +Two input features play an essential role in this pipeline: predicate (pred) and argument (argu). Two other features: predicate context (ctx-p) and region mark (mr) are also adopted. Because a single predicate word can not exactly describe the predicate information, especially when the same words appear more than one times in a sentence. With the predicate context, the ambiguity can be largely eliminated. Similarly, we use region mark mr = 1 to denote the argument position if it locates in the predicate context region, or mr = 0 if does not. These four simple features are all we need for our SRL system. Features of one sample with context size set to 1 is showed as following[2]: +
+![pic](./feature.jpg) +
+ +In this sample, the coresponding labelled sentence is: + +[ A1 A record date ] has [ AM-NEG n't ] been [ V set ] . + +In the demo, we adopt the feature template as above, consists of : `argument`, `predicate`, `ctx-p (p=-1,0,1)`, `mark` and use `B/I/O` scheme to label each argument. These features and labels are stored in `feature` file, and separated by `\t`. + +### Data Provider + +`dataprovider.py` is the python file to wrap data. `hook()` function is to define the data slots for network. The Six features and label are all IndexSlots. +``` +def hook(settings, word_dict, label_dict, **kwargs): + settings.word_dict = word_dict + settings.label_dict = label_dict + #all inputs are integral and sequential type + settings.slots = [ + integer_value_sequence(len(word_dict)), + integer_value_sequence(len(word_dict)), + integer_value_sequence(len(word_dict)), + integer_value_sequence(len(word_dict)), + integer_value_sequence(len(word_dict)), + integer_value_sequence(2), + integer_value_sequence(len(label_dict))] +``` +The corresponding data iterator is as following: +``` +@provider(use_seq=True, init_hook=hook) +def process(obj, file_name): + with open(file_name, 'r') as fdata: + for line in fdata: + sentence, predicate, ctx_n1, ctx_0, ctx_p1, mark, label = line.strip().split('\t') + words = sentence.split() + sen_len = len(words) + word_slot = [obj.word_dict.get(w, UNK_IDX) for w in words] + + predicate_slot = [obj.word_dict.get(predicate, UNK_IDX)] * sen_len + ctx_n1_slot = [obj.word_dict.get(ctx_n1, UNK_IDX) ] * sen_len + ctx_0_slot = [obj.word_dict.get(ctx_0, UNK_IDX) ] * sen_len + ctx_p1_slot = [obj.word_dict.get(ctx_p1, UNK_IDX) ] * sen_len + + marks = mark.split() + mark_slot = [int(w) for w in marks] + + label_list = label.split() + label_slot = [obj.label_dict.get(w) for w in label_list] + + yield word_slot, predicate_slot, ctx_n1_slot, ctx_0_slot, ctx_p1_slot, mark_slot, label_slot +``` +The `process`function yield 7 lists which are six features and labels. + +### Neural Network Config +`db_lstm.py` is the neural network config file to load the dictionaries and define the data provider module and network architecture during the training procedure. + +Seven `data_layer` load instances from data provider. Six features are transformed into embedddings respectively, and mixed by `mixed_layer` . Deep bidirectional LSTM layers extract features for the softmax layer. The objective function is cross entropy of labels. + +### Run Training +The script for training is `train.sh`, user just need to execute: +```bash + ./train.sh +``` +The content in `train.sh`: +``` +paddle train \ + --config=./db_lstm.py \ + --save_dir=./output \ + --trainer_count=4 \ + --log_period=10 \ + --num_passes=500 \ + --use_gpu=false \ + --show_parameter_stats_period=10 \ + --test_all_data_in_one_period=1 \ +2>&1 | tee 'train.log' +``` + +- \--config=./db_lstm.py : network config file. +- \--save_di=./output: output path to save models. +- \--trainer_count=4 : set thread number (or GPU count). +- \--log_period=10 : print log every 20 batches. +- \--num_passes=500: set pass number, one pass in PaddlePaddle means training all samples in dataset one time. +- \--use_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train. +- \--show_parameter_stats_period=10: show parameter statistic every 100 batches. +- \--test_all_data_in_one_period=1: test all data in every testing. + + +After training, the models will be saved in directory `output`. + +### Run testing +The script for testing is `test.sh`, user just need to execute: +```bash + ./test.sh +``` +The main part in `tesh.sh` +``` +paddle train \ + --config=./db_lstm.py \ + --model_list=$model_list \ + --job=test \ + --config_args=is_test=1 \ +``` + + - \--config=./db_lstm.py: network config file + - \--model_list=$model_list.list: model list file + - \--job=test: indicate the test job + - \--config_args=is_test=1: flag to indicate test + + +### Run prediction +The script for prediction is `predict.sh`, user just need to execute: +```bash + ./predict.sh + +``` +In `predict.sh`, user should offer the network config file, model path, label file, word dictionary file, feature file +``` +python predict.py + -c $config_file + -w $model_path + -l $label_file + -d $dict_file + -i $input_file +``` + +`predict.py` is the main executable python script, which includes functions: load model, load data, data prediction. The network model will output the probability distribution of labels. In the demo, we take the label with maximum probability as result. User can also implement the beam search or viterbi decoding upon the probability distribution matrix. + +After prediction, the result is saved in `predict.res`. + +## Reference +[1] Martha Palmer, Dan Gildea, and Paul Kingsbury. The Proposition Bank: An Annotated Corpus of Semantic Roles , Computational Linguistics, 31(1), 2005. + +[2] Zhou, Jie, and Wei Xu. "End-to-end learning of semantic role labeling using recurrent neural networks." Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. diff --git a/release_doc/0.9.0/doc/_sources/demo/sentiment_analysis/index.txt b/release_doc/0.9.0/doc/_sources/demo/sentiment_analysis/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..9ee6d3a177c19de9fabf7b7e86c7c371bc094736 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/sentiment_analysis/index.txt @@ -0,0 +1,9 @@ +Sentiment Analasis Tutorial +=========================== + +.. toctree:: + :maxdepth: 3 + :glob: + + Training Locally + internal/cluster_train.md diff --git a/release_doc/0.9.0/doc/_sources/demo/sentiment_analysis/sentiment_analysis.txt b/release_doc/0.9.0/doc/_sources/demo/sentiment_analysis/sentiment_analysis.txt new file mode 100644 index 0000000000000000000000000000000000000000..385f49891dcd840c525f7d1c3aaf7f08a7e4903f --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/sentiment_analysis/sentiment_analysis.txt @@ -0,0 +1,325 @@ +# Sentiment Analysis Tutorial + +Sentiment analysis has many applications. A basic task in sentiment analysis is classifying the polarity of a given text at the document, sentence or feature/aspect level. One simple example is to classify the customer reviews in a shopping website, a tourism website, and group buying websites like Amazon, TaoBao, Tmall etc. + +Sentiment analysis is also used to monitor social media based on large amount of reviews or blogs. For example, the researchers analyzed several surveys on consumer confidence and political opinion, found they correlate to sentiment word frequencies in contemporaneous Twitter messages [1]. Another example is to forecast stock movements through analyzing the text content of a daily Twitter blog [2]. + +On the other hand, grabbing the user comments of products and analyzing their sentiment are useful to understand user preferences for companies, products, even competing products. + +This tutorial will guide you through the process of training a Long Short Term Memory (LSTM) Network to classify the sentiment of sentences from [Large Movie Review Dataset](http://ai.stanford.edu/~amaas/data/sentiment/), sometimes known as the [Internet Movie Database (IMDB)](http://ai.stanford.edu/~amaas/papers/wvSent_acl2011.pdf). This dataset contains movie reviews along with their associated binary sentiment polarity labels, namely positive and negative. So randomly guessing yields 50% accuracy. + +## Data Preparation + +### IMDB Data Introduction + +Before training models, we need to preprocess the data and build a dictionary. First, you can use following script to download IMDB dataset and [Moses](http://www.statmt.org/moses/) tool, which is a statistical machine translation system. We provide a data preprocessing script, which is capable of handling not only IMDB data, but also other user-defined data. In order to use the pre-written script, it needs to move labeled train and test samples to another path, which has been done in `get_imdb.sh`. + +``` +cd demo/sentiment/data +./get_imdb.sh +``` +If the data is obtained successfuly, you will see the following files at ```./demo/sentiment/data```: + +``` +aclImdb get_imdb.sh imdb mosesdecoder-master +``` + +* aclImdb: raw dataset downloaded from website. +* imdb: only contains train and test data. +* mosesdecoder-master: Moses tool. + +IMDB dataset contains 25,000 highly polar movie reviews for training, and 25,000 for testing. A negative review has a score ≤ 4 out of 10, and a positive review has a score ≥ 7 out of 10. After running `./get_imdb.sh`, we can find the dataset has the following structure in `aclImdb`. + +``` +imdbEr.txt imdb.vocab README test train +``` +* train: train sets. +* test : test sets. +* imdb.vocab: dictionary. +* imdbEr.txt: expected rating for each token in imdb.vocab. +* README: data documentation. + +Both train and test set directory contains: + +``` +labeledBow.feat neg pos unsup unsupBow.feat urls_neg.txt urls_pos.txt urls_unsup.txt +``` + +* pos: positive samples, contains 12,500 txt files, each file is one movie review. +* neg: negative samples, contains 12,500 txt files, each file is one movie review. +* unsup: unlabeled samples, contains 50,000 txt files. +* urls_xx.txt: urls of each reviews. +* xxBow.feat: already-tokenized bag of words (BoW) features. + +### IMDB Data Preparation + +In this demo, we only use labled train and test set and not use imdb.vocab as dictionary. By default, dictionary is builded on train set. Train set is shuffled and test set is not. `tokenizer.perl` in Moses tool is used to tokenize the words and punctuation. Simply execute the following command to preprcess data. + +``` +cd demo/sentiment/ +./preprocess.sh +``` +preprocess.sh: + +``` +data_dir="./data/imdb" +python preprocess.py -i data_dir +``` + +* data_dir: input data directory. +* preprocess.py: preprocess script. + +If running successfully, you will see `demo/sentiment/data/pre-imdb` directory as follows: + +``` +dict.txt labels.list test.list test_part_000 train.list train_part_000 +``` +* test\_part\_000 and train\_part\_000: all labeled test and train sets. Train sets have be shuffled. +* train.list and test.list: train and test file lists. +* dict.txt: dictionary generated on train sets by default. +* labels.txt: neg 0, pos 1, means label 0 is negative review, label 1 is positive review. + +### User-defined Data Preparation + +If you perform other sentiment classifcation task, you can prepare data as follows. We have provided the scripts to build dictionary and preprocess data. So just organize data as follows. + +``` +dataset +|----train +| |----class1 +| | |----text_files +| |----class2 +| | |----text_files +| | ... +|----test +| |----class1 +| | |----text_files +| |----class2 +| | |----text_files +| | ... +``` +* dataset: 1st directory. +* train, test: 2nd directory. +* class1,class2,...: 3rd directory. +* text_files: samples with text file format. + +All samples with text files format under the same folder are same category. Each text file contains one or more samples and each line is one sample. In order to shuffle fully, the preprocessing is a little different for data with multiple lines in one text file, which needs to set `-m True` in `preprocess.sh`. And tokenizer.perl is used by default. If you don't need it, only set `-t False` in `preprocess.sh'. + +## Training + +In this task, we use Recurrent Neural Network (RNN) of LSTM architecure to train sentiment analysis model. LSTM model was introduced primarily in order to overcome the problem of vanishing gradients. LSTM network resembles a standard recurrent neural network with a hidden layer, but each ordinary node in the hidden layer is replaced by a memory cell. Each memory cell contains four main elements: an input gate, a neuron with a self-recurrent connection, a forget gate and an output gate. More details can be found in the literature [4]. The biggest advantage of the LSTM architecture is that it learns to memorize information over long time intervals without the loss of short time memory. At each time step with a new coming word, historical information stored in the memory block is updated to iteratively learn the sequence representation. + +
![LSTM](./lstm.png)
+
Figure 1. LSTM [3]
+ +Sentiment analysis is among the most typical problems in natural language understanding. It aims at predicting the attitude expressed in a sequence. Usually, only some key words, like adjectives and adverbs words, play a major role in predicting the sentiment of sequences or paragraphs. However, some review or comment contexts are very long, such as IMDB dataset. We use LSTM to perform this task for its improved design with the gate mechanism. First, it is able to summarize the representation from word level to context level with variable context length which is adapted by the gate values. Second, it can utilize the expanded context at the sentence level, while most methods are good at utilizing n-gram level knowledge. Third, it learns the paragraph representation directly rather than combining the context level information. This results in this end-to-end framework. + +In this demo we provide two network, namely bidirectional-LSTM and three layers of stacked-LSTM. + +#### Bidirectional-LSTM + +One is a bidirectional LSTM network, connected by fully connected layer and softmax, as shown in Figure 2. + +
![BiLSTM](./bi_lstm.jpg)
+
Figure 2. Bidirectional-LSTM
+ +#### Stacked-LSTM +Another is three-layer LSTM structure in Figure 3. The bottom of the figure is word embedding. Next, three LSTM-Hidden layers are connected and the second LSTM is reversed. Then extract the maximum hidden vectors of all time step of hidden and LSTM layer as the representation for the entire sequence. Finally, a fully connected feed forward layer with softmax activation is used to perform the classification task. This network is refered to paper [5]. + +
![StackedLSTM](./stacked_lstm.jpg)
+
Figure 3. Stacked-LSTM for sentiment analysis
+ +**Config** + +Switch into `demo/sentiment` directory, `trainer_config.py` file is an example of the config, containing algorithm and newtork configure. The first line imports predefined networks from `sentiment_net.py`. + +trainer_config.py: + +```python +from sentiment_net import * + +data_dir = "./data/pre-imdb" +# whether this config is used for test +is_test = get_config_arg('is_test', bool, False) +# whether this config is used for prediction +is_predict = get_config_arg('is_predict', bool, False) +dict_dim, class_dim = sentiment_data(data_dir, is_test, is_predict) + +################## Algorithm Config ##################### + +settings( + batch_size=128, + learning_rate=2e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(8e-4), + gradient_clipping_threshold=25 +) + +#################### Network Config ###################### +stacked_lstm_net(dict_dim, class_dim=class_dim, + stacked_num=3, is_predict=is_predict) +#bidirectional_lstm_net(dict_dim, class_dim=class_dim, is_predict=is_predict) +``` + +* **Data Definition**: + * get\_config\_arg(): get arguments setted by `--config_args=xx` in commandline argument. + * Define TrainData and TestData provider, here using Python interface (PyDataProviderWrapper) of PaddlePaddle to load data. For details, you can refer to the document of PyDataProvider. + +* **Algorithm Configuration**: + * use sgd algorithm. + * use adam optimization. + * set batch size of 128. + * set average sgd window. + * set global learning rate. +* **Network Configuration**: + * dict_dim: get dictionary dimension. + * class_dim: set category number, IMDB has two label, namely positive and negative label. + * `stacked_lstm_net`: predefined network as shown in Figure 3, use this network by default. + * `bidirectional_lstm_net`: predefined network as shown in Figure 2. + +**Training** + +Install PaddlePaddle first if necessary. Then you can use script `train.sh` as follows to launch local training. + +``` +cd demo/sentiment/ +./train.sh +``` + +train.sh: + +``` +config=trainer_config.py +output=./model_output +paddle train --config=$config \ + --save_dir=$output \ + --job=train \ + --use_gpu=false \ + --trainer_count=4 \ + --num_passes=10 \ + --log_period=20 \ + --dot_period=20 \ + --show_parameter_stats_period=100 \ + --test_all_data_in_one_period=1 \ + 2>&1 | tee 'train.log' +``` + +* \--config=$config: set network config. +* \--save\_dir=$output: set output path to save models. +* \--job=train: set job mode to train. +* \--use\_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train. +* \--trainer\_count=4: set thread number (or GPU count). +* \--num\_passes=15: set pass number, one pass in PaddlePaddle means training all samples in dataset one time. +* \--log\_period=20: print log every 20 batches. +* \--show\_parameter\_stats\_period=100: show parameter statistic every 100 batches. +* \--test\_all_data\_in\_one\_period=1: test all data every testing. + +If the run succeeds, the output log is saved in path of `demo/sentiment/train.log` and model is saved in path of `demo/sentiment/model_output/`. The output log is explained as follows. + +``` +Batch=20 samples=2560 AvgCost=0.681644 CurrentCost=0.681644 Eval: classification_error_evaluator=0.36875 CurrentEval: classification_error_evaluator=0.36875 +... +Pass=0 Batch=196 samples=25000 AvgCost=0.418964 Eval: classification_error_evaluator=0.1922 +Test samples=24999 cost=0.39297 Eval: classification_error_evaluator=0.149406 +``` +- Batch=xx: means passing xx batches. +- samples=xx: means passing xx samples. +- AvgCost=xx: averaged cost from 0-th batch to current batch. +- CurrentCost=xx: current cost of latest log_period batches. +- Eval: classification\_error\_evaluator=xx: means classfication error from 0-th batch ro current batch. +- CurrentEval: classification\_error\_evaluator: current classfication error of the lates log_period batches. +- Pass=0: Going through all training set one time is called one pass. 0 means going through training set first time. + +By default, we use the `stacked_lstm_net` network, which converges at a faster rate than `bidirectional_lstm_net` when passing same sample number. If you want to use bidirectional LSTM, just remove comment in the last line and comment `stacked_lstm_net`. + +## Testing + +Testing means evaluating the labeled validation set using trained model. + +``` +cd demo/sentiment +./test.sh +``` + +test.sh: + +```bash +function get_best_pass() { + cat $1 | grep -Pzo 'Test .*\n.*pass-.*' | \ + sed -r 'N;s/Test.* error=([0-9]+\.[0-9]+).*\n.*pass-([0-9]+)/\1 \2/g' | \ + sort | head -n 1 +} + +log=train.log +LOG=`get_best_pass $log` +LOG=(${LOG}) +evaluate_pass="model_output/pass-${LOG[1]}" + +echo 'evaluating from pass '$evaluate_pass + +model_list=./model.list +touch $model_list | echo $evaluate_pass > $model_list +net_conf=trainer_config.py +paddle train --config=$net_conf \ + --model_list=$model_list \ + --job=test \ + --use_gpu=false \ + --trainer_count=4 \ + --config_args=is_test=1 \ + 2>&1 | tee 'test.log' +``` + +The function `get_best_pass` gets the best model by classification error rate for testing. In this example, We use test dataset of IMDB as validation by default. Unlike training, it needs to specify `--job=test` and model path, namely `--model_list=$model_list` here. If running successfully, the log is saved in path of `demo/sentiment/test.log`. For example, in our test, the best model is `model_output/pass-00002`, the classification error is 0.115645 as follows. + +``` +Pass=0 samples=24999 AvgCost=0.280471 Eval: classification_error_evaluator=0.115645 +``` + +## Prediction + +`predict.py` provides a predicting interface. You should install python api of PaddlePaddle before using it. One example to predict unlabeled review of IMDB is as follows. Simply running: + +``` +cd demo/sentiment +./predict.sh +``` +predict.sh: + +``` +#Note the default model is pass-00002, you shold make sure the model path +#exists or change the mode path. +model=model_output/pass-00002/ +config=trainer_config.py +label=data/pre-imdb/labels.list +python predict.py \ + -n $config\ + -w $model \ + -b $label \ + -d data/pre-imdb/dict.txt \ + -i data/aclImdb/test/pos/10007_10.txt +``` + +* `predict.py`: predicting interface. +* -n $config : set network configure. +* -w $model: set model path. +* -b $label: set dictionary about corresponding relation between integer label and string label. +* -d data/pre-imdb/dict.txt: set dictionary. +* -i data/aclImdb/test/pos/10014_7.txt: set one example file to predict. + +Note you should make sure the default model path `model_output/pass-00002` +exists or change the model path. + +Predicting result of this example: + +``` +Loading parameters from model_output/pass-00002/ +./data/aclImdb/test/pos/10014_7.txt: predicting label is pos +``` +We sincerely appreciate your interest and welcome your contributions. + +## Reference +[1] Brendan O'Connor, Ramnath Balasubramanyan, Bryan R. Routledge, and Noah A. Smith. 2010. [From Tweets to Polls: Linking Text Sentiment to Public Opinion Time Series](http://homes.cs.washington.edu/~nasmith/papers/oconnor+balasubramanyan+routledge+smith.icwsm10.pdf). In ICWSM-2010.
+[2] Johan Bollen, Huina Mao, Xiaojun Zeng. 2011. [Twitter mood predicts the stock market](http://arxiv.org/abs/1010.3003), Journal of Computational Science.
+[3] Alex Graves, Marcus Liwicki, Santiago Fernan- dez, Roman Bertolami, Horst Bunke, and Ju ̈rgen Schmidhuber. 2009. [A novel connectionist system for unconstrained handwriting recognition. IEEE Transactions on Pattern Analysis and Machine In- telligence](http://www.cs.toronto.edu/~graves/tpami_2009.pdf), 31(5):855–868.
+[4] Zachary C. Lipton, [A Critical Review of Recurrent Neural Networks for Sequence Learning](http://arxiv.org/abs/1506.00019v1), arXiv:1506.00019.
+[5] Jie Zhou and Wei Xu; [End-to-end Learning of Semantic Role Labeling Using Recurrent Neural Networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf); ACL-IJCNLP 2015.
diff --git a/release_doc/0.9.0/doc/_sources/demo/text_generation/index.txt b/release_doc/0.9.0/doc/_sources/demo/text_generation/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..82da5524197ac8d4652f0e30f446b5a88bf1629d --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/text_generation/index.txt @@ -0,0 +1,9 @@ +Text Generation Tutorial +======================== + +.. toctree:: + :maxdepth: 3 + :glob: + + Training Locally + internal/cluster_train.md diff --git a/release_doc/0.9.0/doc/_sources/demo/text_generation/text_generation.txt b/release_doc/0.9.0/doc/_sources/demo/text_generation/text_generation.txt new file mode 100644 index 0000000000000000000000000000000000000000..d63f5cb6074c5768f9cff7937c5f0771c2619642 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/demo/text_generation/text_generation.txt @@ -0,0 +1,338 @@ +# Text generation Tutorial # + +Sequence to sequence has been proven to be a powerful model for language generation. It can be used for machine translation, query rewriting, image captioning, etc. + +This tutorial guides you through training a sequence to sequence model for neural machine translation (NMT) network that translates French to English. + +We follow the paper [Neural Machine Translation by Jointly Learning to Align and Translate](http://arxiv.org/abs/1409.0473) , which details the model architecture and training procedure for good performance on WMT-14 dataset. This tutorial reproduces this result in PaddlePaddle. + +We thank @caoying for the pull request that defines the model architecture and solver configurations. + +## Data Preparation ## +### Download and Extract ### +Download the WMT-14 dataset from [http://www-lium.univ-lemans.fr/~schwenk/cslm\_joint\_paper/](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/), extract it, and divide Develop and Test data into separate folder. + +- **Train data**: [bitexts (after selection)](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/bitexts.tgz) +- **Develop and Test data**: [dev+test data](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz) + +To do this, simply run the following commands in linux, otherwise, you need to download, extract, divide, and rename the file suffix respectively. + +```bash +cd demo/seqToseq/data +./wmt14_data.sh +``` + +We should find that the dataset `wmt14` has three folders as shown in the following table. + ++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
folder nameFrench-English parallel corpora filenumber of total filesize
train_dataccb2_pc30.src, ccb2_pc30.trg, etctwelve3.55G
test_datantst1213.src, ntst1213.trgtwo1636k
gen_datantst14.src, ntst14.trgtwo864k
+
+ +- Each folder has French-English parallel corpora +- **XXX.src** are source French files; **XXX.trg** are target English files. +- The number of lines of **XXX.src** and **XXX.trg** should be the same. +- Each line is a French/English sentence. +- There is a one-to-one correspondence between the sentence at the i-th line of **XXX.src** and **XXX.trg**. + +### User Defined Dataset ### + +If you need to do other sequence-to-sequence tasks, such as Paraphrasing, you only need to organize the data as follows, and place them in `demo/seqToseq/data`: + + dataset + train + file1.src file1.trg + file2.src file2.trg + ...... + test + file1.src file1.trg + file2.src file2.trg + ...... + gen + file1.src file1.trg + file2.src file2.trg + ...... +- 1st directory: dataset folder name +- 2nd directory: folder of train, test, and gen. The names of these three folders are fixed. +- 3rd file: Source-Target parallel corpora files. + - **XXX.src** are source files, **XXX.trg** are target files. + - Each line of the file must be a sequence. + - There should be a one-to-one correspondence between the i-th sequence of **XXX.src** and **XXX.trg**. + +## Data Preprocess ## +### Preprocessing Workflow ### +- Concat each Source-Target parallel corpora to be one file: + - concat each **XXX.src** and **XXX.trg** to be **XXX**. + - the i-th line of **XXX** = the i-th line of **XXX.src** + '\t' + the i-th line of **XXX.trg** +- Build source and target dictionary of train data, each dictionary has DICTSIZE words: + - the most frequent (DICTSIZE-3) words + - 3 special token: + - ``: the start of a sequence + - ``: the end of a sequence + - ``: a word not included in dictionary + +### Preprocessing Command and Result +The general command for preprocessing the dataset is: + +```python +cd demo/seqToseq/ +python preprocess.py -i INPUT [-d DICTSIZE] [-m] +``` + +- `-i INPUT`: the path of input original dataset +- `-d DICTSIZE`: the specified word count of dictionary, if not set, dictionary will contain all the words in input dataset +- `-m --mergeDict`: merge source and target dictionary, thus, two dictionaries have the same context + +And you will see messages like this: + + concat parallel corpora for dataset + build source dictionary for train data + build target dictionary for train data + dictionary size is XXX + +Here, you can simply run the command: + +```python +python preprocess.py -i data/wmt14 -d 30000 +``` + +It will take several minutes, and store the preprocessed dataset in `demo/seqToseq/data/pre-wmt14`, the directory has following structure. + + train test gen train.list test.list gen.list src.dict trg.dict + +- **train, test, gen**: folder contains French-English parallel corpora of train data, test data and gen data respectively. Each line of file in folder contains two parts, the former is a French sequence, and the latter is a corresponding English sequence. +- **train.list, test.list, gen.list**: text contains a file list in train folder, test folder and gen folder respectively +- **src.dict, trg.dict**: source (French) / target (English) dictionary, each dictionary has 30000 words: the most frequent 29997 words and 3 special token + +## Model Training ## +### Introduction ### + +Neural machine translation (NMT) aims at building a single neural network that can be jointly tuned to maximize translation performance. Recently proposed NMT models often belong to a family of encoder–decoder models. Encoder-Decoder models encode a source sentence into a fixed-length vector from which a decoder generates a target sentence. + +In this task, we use an extension to the encoder–decoder model which learns to align and translate jointly. Each time the model generates a word in a translation, it searches for a set of positions in the source sentence for the most relevant information. The decoder predicts a target word based on the context vectors associated with these source positions and all the previous generated target words. For more detailed explanation, readers can refer to paper [Neural Machine Translation by Jointly Learning to Align and Translate](http://arxiv.org/abs/1409.0473). + +The most distinguishing feature of this model is that it doesn't encode an input sentence into a single fixed-length vector. Instead, it encodes the input sentence into a sequence of vectors, where one vector corresponds to an input element. A subset of these vectors is chosen adaptively while decoding the translated sentence. This frees a NMT model from having to squash all the information of a source sentence, regardless of its length, into a fixed-length vector. The improvement of this model is more apparent for longer sentences, but the improvement can be observed for sentences of any length. +
![](./encoder-decoder-attention-model.png)
+
Figure 1. Encoder-Decoder-Attention-Model
+ +### Training Model in PaddlePaddle ### +We need to create a model config file before training. Here is an example `demo/seqToseq/translation/train.conf`. The first three lines import python function for defining network, and define the job_mode and attention_mode. + +```python +from seqToseq_net import * +is_generating = False + +### Data Definiation +train_conf = seq_to_seq_data(data_dir = "./data/pre-wmt14", + is_generating = is_generating) + +### Algorithm Configuration +settings( + learning_method = AdamOptimizer(), + batch_size = 50, + learning_rate = 5e-4) + +### Network Architecture +gru_encoder_decoder(train_conf, is_generating) +``` + +1. **Data Definiation**: We define a SeqToSeq train and test data in our example. It returns train_conf as the configuration, following is its input arguments: + - data_dir: directory of train data and test data + - is\_generating: whether this config is used for generating, here is false +2. **Algorithm Configuration**: We use the SGD training algorithm (default), ADAM learning method in our example, specify batch_size as 50, and learning rate as 5e-4. +3. **Network Architecture**: We use an attention version of GRU Encoder-Decoder network in our example. It consists a bidirectional GRU as an encoder and a decoder that emulates searching through a source sentence during decoding a translation. + +### Training Command and Result### +After writing the model config, we can train the model by running the command: + +```bash +cd demo/seqToseq/translation +./train.sh +``` + +The `train.sh` is shown as follows: + +```bash +paddle train \ +--config='translation/train.conf' \ +--save_dir='translation/model' \ +--use_gpu=false \ +--num_passes=16 \ +--show_parameter_stats_period=100 \ +--trainer_count=4 \ +--log_period=10 \ +--dot_period=5 \ +2>&1 | tee 'translation/train.log' +``` +- config: set config of neural network +- save_dir: set output path to save models +- use_gpu: whether to use GPU to train, here use CPU +- num_passes: set number of passes. One pass in paddle means training all samples in dataset one time +- show_parameter_stats_period: here show parameter statistic every 100 batches +- trainer_count: set number of CPU threads or GPU devices +- log_period: here print log every 10 batches +- dot_period: here print '.' every 5 batches + +The training loss function is printed every 10 batch by default, and you will see messages like this: + + I0719 19:16:45.952062 15563 TrainerInternal.cpp:160] Batch=10 samples=500 AvgCost=198.475 CurrentCost=198.475 Eval: classification_error_evaluator=0.737155 CurrentEval: classification_error_evaluator=0.737155 + I0719 19:17:56.707319 15563 TrainerInternal.cpp:160] Batch=20 samples=1000 AvgCost=157.479 CurrentCost=116.483 Eval: classification_error_evaluator=0.698392 CurrentEval: classification_error_evaluator=0.659065 + ..... +- AvgCost: Average Cost from 0th batch to current batch +- CurrentCost: Cost in current batch +- classification\_error\_evaluator(Eval): False prediction rate for each word from 0th evaluation to current evaluation +- classification\_error\_evaluator(CurrentEval): False prediction rate for each word in current evaluation + +And when the classification\_error\_evaluator is less than 0.35, the model is trained sucessfully. + +## Text Generation ## +### Introduction ### + +Generally speaking, the NMT model is conditioned on the encodings of the source sentence, and then to predict the next target word by given the current target word. In the training process, the current word is always knowns as the ground truth, by contrast. In the generating process, the current word is the output of the decoder in last time step, which is accessed to from a memory in PaddlePaddle. + +Besides, we use Beam Search to generate sequences. Beam search uses breadth-first search to build its search tree. At each level of the tree, it generates all successors of the states at the current level, sorting them in increasing order of heuristic cost. However, it only stores a predetermined number of best states at each level (called the beam size). + +### Pretrained model ### +We trained the model on a cluster with 50 nodes, each node has two 6-core CPUs. We trained 16 passes in 5 days, where each pass takes 7 hours. The model_dir has 16 sub-folder, each of which contains the whole model parameters with 202MB size. And we find pass-00012 model has the highest BLEU 27.77 (see paper [BLEU: a Method for Automatic Evaluation of Machine Translation](http://www.aclweb.org/anthology/P02-1040.pdf)). To download and extract this model, simply run the following commands in linux. + +```bash +cd demo/seqToseq/data +./wmt14_model.sh +``` + +### Generating Model in PaddlePaddle ### +We need to create a model config file before translating French sequence. Here is an example `demo/seqToseq/translation/gen.conf`, the first three lines import python function for defining network, and define the job\_mode and attention\_mode. + +```python +from seqToseq_net import * +is_generating = True + +################## Data Definiation ##################### +gen_conf = seq_to_seq_data(data_dir = "./data/pre-wmt14", + is_generating = is_generating, + gen_result = "./translation/gen_result") + +############## Algorithm Configuration ################## +settings( + learning_method = AdamOptimizer(), + batch_size = 1, + learning_rate = 0) + +################# Network configure ##################### +gru_encoder_decoder(gen_conf, is_generating) +``` + +1. **Data Definiation**: We defines an SeqToSeq gen data in our example. It returns gen_conf as the configuration, following is its input arguments: + - data\_dir: directory of gen data + - is\_generating: whether this config is used for generating, here is false + - gen\_result: file to store the generation result +2. **Algorithm Configuration**: We use SGD traing algorithm in generation, and specify batch_size as 1 (each time generate one sequence), and learning rate as 0. +3. **Network Architecture**: Essentially the same as the training model. + +### Generating Command and Result ### +After writing the model config, we can do text translation from French to English by running the command: + +```bash +cd demo/seqToseq/translation +./gen.sh +``` + +The `gen.sh` is shown as follows, unlike training, there are some different arguments to specify: + +```bash +paddle train \ +--job=test \ +--config='translation/gen.conf' \ +--save_dir='data/wmt14_model' \ +--use_gpu=true \ +--num_passes=13 \ +--test_pass=12 \ +--trainer_count=1 \ +2>&1 | tee 'translation/gen.log' +``` +- job: set job mode to test +- save_dir: the path of saved models +- num_passes and test_pass: loading model parameters from test_pass to (num_passes - 1), here only loads `data/wmt14_model/pass-00012` + +You will see messages like this: + + I0706 14:48:31.178915 31441 GradientMachine.cpp:143] Loading parameters from data/wmt14_model/pass-00012 + I0706 14:48:40.012039 31441 Tester.cpp:125] Batch=100 samples=100 AvgCost=0 + I0706 14:48:48.898632 31441 Tester.cpp:125] Batch=200 samples=200 AvgCost=0 + ... + +And the generating result in `demo/seqToseq/translation/gen_result` likes: + + 0 + 0 -11.1314 The about the width of the seats while large controls are at stake + 1 -11.1519 The on the width of the seats while large controls are at stake + 2 -11.5988 The about the width of the seats while large controls are at stake . + + 1 + 0 -24.4149 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of the Dubai . + 1 -26.9524 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of Dubai ' s . + 2 -27.9574 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of Dubai ' s Dubai . + ... + +- This is the beam search result, where beam size is 3 +- '0' in 1st-line and '1' in 6th-line mean the sequence-id in gen data +- Other six lines list the beam search results + - The 2nd-column is the score of beam search (from large to small) + - The 3rd-colunm is the generating English sequence +- There is 2 special tokens: + - ``: the end of a sequence + - ``: a word not included in dictionary + +### Bleu Evalutaion ### +Human evaluations of machine translation are extensive but expensive. Paper [BLEU: a Method for Automatic Evaluation of Machine Translation](http://www.aclweb.org/anthology/P02-1040.pdf) presents a method as an automated understudy to skilled human judges which substitutes for them when there is need for quick or frequent evaluations. [Moses](http://www.statmt.org/moses/) is a statistical machine translation system, and we use [multi-bleu.perl](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/multi-bleu.perl) of it to do Bleu Evalution. To download this script, simply run the following command: + +```bash +cd demo/seqToseq/translation +./moses_bleu.sh +``` + +Since the standard translation is alrealy downloaded as `data/wmt14/gen/ntst14.trg`, we can do Bleu Evalution by running the command: + +```bash +cd demo/seqToseq/translation +./eval_bleu.sh FILE BEAMSIZE +``` + +- FILE: the generation result file +- BEAMSIZE: expand width in beam search diff --git a/release_doc/0.9.0/doc/_sources/dev/new_layer/index.txt b/release_doc/0.9.0/doc/_sources/dev/new_layer/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..37dac3a14dedf2aaa99335e1b0ebe110dc746174 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/dev/new_layer/index.txt @@ -0,0 +1,7 @@ +Writing New Layers +================== + +.. toctree:: + :maxdepth: 3 + + new_layer.rst diff --git a/release_doc/0.9.0/doc/_sources/dev/new_layer/new_layer.txt b/release_doc/0.9.0/doc/_sources/dev/new_layer/new_layer.txt new file mode 100644 index 0000000000000000000000000000000000000000..bd4a4c46c87f6429338b4d220a80b6265a1f253f --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/dev/new_layer/new_layer.txt @@ -0,0 +1,390 @@ +Writing New Layers +================== + +This tutorial will guide you to write customized layers in PaddlePaddle. We will utilize fully connected layer as an example to guide you through the following steps for writing a new layer. + +- Derive equations for the forward and backward part of the layer. +- Implement C++ class for the layer. +- Write gradient check unit test to make sure the gradients are correctly computed. +- Implement Python wrapper for the layer. + +Derive Equations +================ + +First we need to derive equations of the *forward* and *backward* part of the layer. The forward part computes the output given an input. The backward part computes the gradients of the input and the parameters given the the gradients of the output. + +The illustration of a fully connected layer is shown in the following figure. In a fully connected layer, all output nodes are connected to all the input nodes. + +.. image:: FullyConnected.jpg + :align: center + :scale: 60 % + +The *forward part* of a layer transforms an input into the corresponding output. +Fully connected layer takes a dense input vector with dimension :math:`D_i`. It uses a transformation matrix :math:`W` with size :math:`D_i \times D_o` to project :math:`x` into a :math:`D_o` dimensional vector, and add a bias vector :math:`b` with dimension :math:`D_o` to the vector. + +.. math:: + + y = f(W^T x + b) + +where :math:`f(.)` is an nonlinear *activation* function, such as sigmoid, tanh, and Relu. + +The transformation matrix :math:`W` and bias vector :math:`b` are the *parameters* of the layer. The *parameters* of a layer are learned during training in the *backward pass*. The backward pass computes the gradients of the output function with respect to all parameters and inputs. The optimizer can use chain rule to compute the gradients of the loss function with respect to each parameter. + +Suppose our loss function is :math:`c(y)`, then + +.. math:: + + \frac{\partial c(y)}{\partial x} = \frac{\partial c(y)}{\partial y} \frac{\partial y}{\partial x} + +Suppose :math:`z = f(W^T x + b)`, then + +.. math:: + + \frac{\partial y}{\partial z} = \frac{\partial f(z)}{\partial z} + +This derivative can be automatically computed by our base layer class. + +Then, for fully connected layer, we need to compute: + +.. math:: + + \frac{\partial z}{\partial x} = W, \frac{\partial z_j}{\partial W_{ij}} = x_i, \frac{\partial z}{\partial b} = \mathbf 1 + +where :math:`\mathbf 1` is an all one vector, :math:`W_{ij}` is the number at the i-th row and j-th column of the matrix :math:`W`, :math:`z_j` is the j-th component of the vector :math:`z`, and :math:`x_i` is the i-th component of the vector :math:`x`. + +Finally we can use chain rule to calculate :math:`\frac{\partial z}{\partial x}`, and :math:`\frac{\partial z}{\partial W}`. The details of the computation will be given in the next section. + +Implement C++ Class +=================== + +The C++ class of the layer implements the initialization, forward, and backward part of the layer. The fully connected layer is at :code:`paddle/gserver/layers/FullyConnectedLayer.h` and :code:`paddle/gserver/layers/FullyConnectedLayer.cpp`. We list simplified version of the code below. + +It needs to derive the base class :code:`paddle::BaseLayer`, and it needs to override the following functions: + +- constructor and destructor. +- :code:`init` function. It is used to initialize the parameters and settings. +- :code:`forward`. It implements the forward part of the layer. +- :code:`backward`. It implements the backward part of the layer. +- :code:`prefetch`. It is utilized to determine the rows corresponding parameter matrix to prefetch from parameter server. You do not need to override this function if your layer does not need remote sparse update. (most layers do not need to support remote sparse update) + + +The header file is listed below: + +.. code-block:: c++ + + namespace paddle { + /** + * A layer has full connections to all neurons in the previous layer. + * It computes an inner product with a set of learned weights, and + * (optionally) adds biases. + * + * The config file api is fc_layer. + */ + + class FullyConnectedLayer : public Layer { + protected: + WeightList weights_; + std::unique_ptr biases_; + + public: + explicit FullyConnectedLayer(const LayerConfig& config) + : Layer(config) {} + ~FullyConnectedLayer() {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + Weight& getWeight(int idx) { return *weights_[idx]; } + + void prefetch(); + void forward(PassType passType); + void backward(const UpdateCallback& callback = nullptr); + }; + } // namespace paddle + +It defines the parameters as class variables. We use :code:`Weight` class as abstraction of parameters. It supports multi-thread update. The details of this class will be described in details in the implementations. + +- :code:`weights_` is a list of weights for the transformation matrices. The current implementation can have more than one inputs. Thus, it has a list of weights. One weight corresponds to an input. +- :code:`biases_` is a weight for the bias vector. + +The fully connected layer does not have layer configuration hyper-parameters. If there are some layer hyper-parameters, a common practice is to store it in :code:`LayerConfig& config`, and put it into a class variable in the constructor. + +The following code snippet implements the :code:`init` function. + +- First, every :code:`init` function must call the :code:`init` function of the base class :code:`Layer::init(layerMap, parameterMap);`. This statement will initialize the required variables and connections for each layer. +- The it initializes all the weights matrices :math:`W`. The current implementation can have more than one inputs. Thus, it has a list of weights. +- Finally, it initializes the bias. + + +.. code-block:: c++ + + bool FullyConnectedLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + + /* initialize the weightList */ + CHECK(inputLayers_.size() == parameters_.size()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + // Option the parameters + size_t height = inputLayers_[i]->getSize(); + size_t width = getSize(); + + // create a new weight + if (parameters_[i]->isSparse()) { + CHECK_LE(parameters_[i]->getSize(), width * height); + } else { + CHECK_EQ(parameters_[i]->getSize(), width * height); + } + Weight* w = new Weight(height, width, parameters_[i]); + + // append the new weight to the list + weights_.emplace_back(w); + } + + /* initialize biases_ */ + if (biasParameter_.get() != NULL) { + biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); + } + + return true; + } + +The implementation of the forward part has the following steps. + +- Every layer must call :code:`Layer::forward(passType);` at the beginning of its :code:`forward` function. +- Then it allocates memory for the output using :code:`reserveOutput(batchSize, size);`. This step is necessary because we support the batches to have different batch sizes. :code:`reserveOutput` will change the size of the output accordingly. For the sake of efficiency, we will allocate new memory if we want to expand the matrix, but we will reuse the existing memory block if we want to shrink the matrix. +- Then it computes :math:`\sum_i W_i x + b` using Matrix operations. :code:`getInput(i).value` retrieve the matrix of the i-th input. Each input is a :math:`batchSize \times dim` matrix, where each row represents an single input in a batch. For a complete lists of supported matrix operations, please refer to :code:`paddle/math/Matrix.h` and :code:`paddle/math/BaseMatrix.h`. +- Finally it applies the activation function using :code:`forwardActivation();`. It will automatically applies the corresponding activation function specifies in the network configuration. + + +.. code-block:: c++ + + void FullyConnectedLayer::forward(PassType passType) { + Layer::forward(passType); + + /* malloc memory for the output_ if necessary */ + int batchSize = getInput(0).getBatchSize(); + int size = getSize(); + + { + // Settup the size of the output. + reserveOutput(batchSize, size); + } + + MatrixPtr outV = getOutputValue(); + + // Apply the the transformation matrix to each input. + for (size_t i = 0; i != inputLayers_.size(); ++i) { + auto input = getInput(i); + CHECK(input.value) << "The input of 'fc' layer must be matrix"; + i == 0 ? outV->mul(input.value, weights_[i]->getW(), 1, 0) + : outV->mul(input.value, weights_[i]->getW(), 1, 1); + } + + /* add the bias-vector */ + if (biases_.get() != NULL) { + outV->addBias(*(biases_->getW()), 1); + } + + /* activation */ { + forwardActivation(); + } + } + +The implementation of the backward part has the following steps. + +- :code:`backwardActivation()` computes the gradients of the activation. The gradients will be multiplies in place to the gradients of the output, which can be retrieved using :code:`getOutputGrad()`. +- Compute the gradients of bias. Notice that we an use :code:`biases_->getWGrad()` to get the gradient matrix of the corresponding parameter. After the gradient of one parameter is updated, it **MUST** call :code:`getParameterPtr()->incUpdate(callback);`. This is utilize for parameter update over multiple threads or multiple machines. +- Then it computes the gradients of the transformation matrices and inputs, and it calls :code:`incUpdate` for the corresponding parameter. This gives the framework the chance to know whether it has gathered all the gradient to one parameter so that it can do some overlapping work (e.g., network communication) + + +.. code-block:: c++ + + void FullyConnectedLayer::backward(const UpdateCallback& callback) { + /* Do derivation for activations.*/ { + backwardActivation(); + } + + if (biases_ && biases_->getWGrad()) { + biases_->getWGrad()->collectBias(*getOutputGrad(), 1); + + /* Increasing the number of gradient */ + biases_->getParameterPtr()->incUpdate(callback); + } + + bool syncFlag = hl_get_sync_flag(); + + for (size_t i = 0; i != inputLayers_.size(); ++i) { + /* Calculate the W-gradient for the current layer */ + if (weights_[i]->getWGrad()) { + MatrixPtr input_T = getInputValue(i)->getTranspose(); + MatrixPtr oGrad = getOutputGrad(); + { + weights_[i]->getWGrad()->mul(input_T, oGrad, 1, 1); + } + } + + + /* Calculate the input layers error */ + MatrixPtr preGrad = getInputGrad(i); + if (NULL != preGrad) { + MatrixPtr weights_T = weights_[i]->getW()->getTranspose(); + preGrad->mul(getOutputGrad(), weights_T, 1, 1); + } + + { + weights_[i]->getParameterPtr()->incUpdate(callback); + } + } + } + +The :code:`prefetch` function specifies the rows that need to be fetched from parameter server during training. It is only useful for remote sparse training. In remote sparse training, the full parameter matrix is stored distributedly at the parameter server. When the layer uses a batch for training, only a subset of locations of the input is non-zero in this batch. Thus, this layer only needs the rows of the transformation matrix corresponding to the locations of these non-zero entries. The :code:`prefetch` function specifies the ids of these rows. + +Most of the layers do not need remote sparse training function. You do not need to override this function in this case. + +.. code-block:: c++ + + void FullyConnectedLayer::prefetch() { + for (size_t i = 0; i != inputLayers_.size(); ++i) { + auto* sparseParam = + dynamic_cast(weights_[i]->getW().get()); + if (sparseParam) { + MatrixPtr input = getInputValue(i); + sparseParam->addRows(input); + } + } + } + +Finally, you can use :code:`REGISTER_LAYER(fc, FullyConnectedLayer);` to register the layer. :code:`fc` is the identifier of the layer, and :code:`FullyConnectedLayer` is the class name of the layer. + +.. code-block:: c++ + + namespace paddle { + REGISTER_LAYER(fc, FullyConnectedLayer); + } + +If the :code:`cpp` file is put into :code:`paddle/gserver/layers`, it will be automatically added to the compilation list. + + +Write Gradient Check Unit Test +=============================== + +An easy way to verify the correctness of new layer's implementation is to write a gradient check unit test. Gradient check unit test utilizes finite difference method to verify the gradient of a layer. It modifies the input with a small perturbation :math:`\Delta x` and observes the changes of output :math:`\Delta y`, the gradient can be computed as :math:`\frac{\Delta y}{\Delta x }`. This gradient can be compared with the gradient computed by the :code:`backward` function of the layer to ensure the correctness of the gradient computation. Notice that the gradient check only tests the correctness of the gradient computation, it does not necessarily guarantee the correctness of the implementation of the :code:`forward` and :code:`backward` function. You need to write more sophisticated unit tests to make sure your layer is implemented correctly. + +All the gradient check unit tests are located in :code:`paddle/gserver/tests/test_LayerGrad.cpp`. You are recommended to put your test into a new test file if you are planning to write a new layer. The gradient test of the gradient check unit test of the fully connected layer is listed below. It has the following steps. + ++ Create layer configuration. A layer configuration can include the following attributes: + - size of the bias parameter. (4096 in our example) + - type of the layer. (fc in our example) + - size of the layer. (4096 in our example) + - activation type. (softmax in our example) + - dropout rate. (0.1 in our example) ++ configure the input of the layer. In our example, we have only one input. + - type of the input (:code:`INPUT_DATA`) in our example. It can be one of the following types + - :code:`INPUT_DATA`: dense vector. + - :code:`INPUT_LABEL`: integer. + - :code:`INPUT_DATA_TARGET`: dense vector, but it does not used to compute gradient. + - :code:`INPUT_SEQUENCE_DATA`: dense vector with sequence information. + - :code:`INPUT_HASSUB_SEQUENCE_DATA`: dense vector with both sequence and sub-sequence information. + - :code:`INPUT_SEQUENCE_LABEL`: integer with sequence information. + - :code:`INPUT_SPARSE_NON_VALUE_DATA`: 0-1 sparse data. + - :code:`INPUT_SPARSE_FLOAT_VALUE_DATA`: float sparse data. + - name of the input. (:code:`layer_0` in our example) + - size of the input. (8192 in our example) + - number of non-zeros, only useful for sparse inputs. + - format of sparse data, only useful for sparse inputs. ++ each inputs needs to call :code:`config.layerConfig.add_inputs();` once. ++ call :code:`testLayerGrad` to perform gradient checks. It has the following arguments. + - layer and input configurations. (:code:`config` in our example) + - type of the input. (:code:`fc` in our example) + - batch size of the gradient check. (100 in our example) + - whether the input is transpose. Most layers need to set it to :code:`false`. (:code:`false` in our example) + - whether to use weights. Some layers or activations perform normalization so that the sum of their output is a constant. For example, the sum of output of a softmax activation is one. In this case, we cannot correctly compute the gradients using regular gradient check techniques. A weighted sum of the output, which is not a constant, is utilized to compute the gradients. (:code:`true` in our example, because the activation of a fully connected layer can be softmax) + +.. code-block:: c++ + + void testFcLayer(string format, size_t nnz) { + // Create layer configuration. + TestConfig config; + config.biasSize = 4096; + config.layerConfig.set_type("fc"); + config.layerConfig.set_size(4096); + config.layerConfig.set_active_type("sigmoid"); + config.layerConfig.set_drop_rate(0.1); + // Setup inputs. + config.inputDefs.push_back( + {INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)}); + config.layerConfig.add_inputs(); + LOG(INFO) << config.inputDefs[0].sparse.sparse << " " + << config.inputDefs[0].sparse.format; + for (auto useGpu : {false, true}) { + testLayerGrad(config, "fc", 100, /* trans */ false, useGpu, + /* weight */ true); + } + } + +If you are creating a new file for the test, such as :code:`paddle/gserver/tests/testFCGrad.cpp`, you need to add the file to :code:`paddle/gserver/tests/CMakeLists.txt`. An example is given below. All the unit tests will run when you execute the command :code:`make tests`. Notice that some layers might need high accuracy for the gradient check unit tests to work well. You need to configure :code:`WITH_DOUBLE` to `ON` when configuring cmake. + +.. code-block:: bash + + add_unittest_without_exec(test_FCGrad + test_FCGrad.cpp + LayerGradUtil.cpp + TestUtil.cpp) + + add_test(NAME test_FCGrad + COMMAND test_FCGrad) + + +Implement Python Wrapper +======================== + +Implementing Python wrapper allows us to use the added layer in configuration files. All the Python wrappers are in file :code:`python/paddle/trainer/config_parser.py`. An example of the Python wrapper for fully connected layer is listed below. It has the following steps: + +- Use :code:`@config_layer('fc')` at the decorator for all the Python wrapper class. :code:`fc` is the identifier of the layer. +- Implements :code:`__init__` constructor function. + - It first call :code:`super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)` base constructor function. :code:`FCLayer` is the Python wrapper class name, and :code:`fc` is the layer identifier name. They must be correct in order for the wrapper to work. + - Then it computes the size and format (whether sparse) of each transformation matrix as well as the size. + +.. code-block:: python + + @config_layer('fc') + class FCLayer(LayerBase): + def __init__( + self, + name, + size, + inputs, + bias=True, + **xargs): + super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs) + for input_index in xrange(len(self.inputs)): + input_layer = self.get_input_layer(input_index) + psize = self.config.size * input_layer.size + dims = [input_layer.size, self.config.size] + format = self.inputs[input_index].format + sparse = format == "csr" or format == "csc" + if sparse: + psize = self.inputs[input_index].nnz + self.create_input_parameter(input_index, psize, dims, sparse, format) + self.create_bias_parameter(bias, self.config.size) + +In network configuration, the layer can be specifies using the following code snippets. The arguments of this class are: + +- :code:`name` is the name identifier of the layer instance. +- :code:`type` is the type of the layer, specified using layer identifier. +- :code:`size` is the output size of the layer. +- :code:`bias` specifies whether this layer instance has bias. +- :code:`inputs` specifies a list of layer instance names as inputs. + +.. code-block:: python + + Layer( + name = "fc1", + type = "fc", + size = 64, + bias = True, + inputs = [Input("pool3")] + ) + +You are also recommended to implement a helper for the Python wrapper, which makes it easier to write models. You can refer to :code:`python/paddle/trainer_config_helpers/layers.py` for examples. diff --git a/release_doc/0.9.0/doc/_sources/index.txt b/release_doc/0.9.0/doc/_sources/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbd08ba52abe529aec84f6b1c2e35300496878a5 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/index.txt @@ -0,0 +1,23 @@ +PaddlePaddle Documentation +========================== + +User Guide +---------- +* [Introduction](introduction/index.md) +* [Quick Start](demo/quick_start/index_en.md) +* [Build and Installation](build/index.rst) +* [Contribute Code](build/contribute_to_paddle.md) +* [User Interface](ui/index.md) +* [Model Config Interface](ui/api/trainer_config_helpers/index.rst) +* [Example and Demo](demo/index.md) +* [Cluster Train](cluster/index.md) + +Development Guide +----------------- +* [Layer Documents](layer.md) +* [Writing New Layers](dev/new_layer/index.rst) +* [Source Code Documents](source/index.md) + +Algorithm Tutorial +------------------ +* [RNN Configuration](algorithm/rnn/rnn.rst) diff --git a/release_doc/0.9.0/doc/_sources/introduction/index.txt b/release_doc/0.9.0/doc/_sources/introduction/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..01f52031a1d0247cd0b885218c17001f23685239 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/introduction/index.txt @@ -0,0 +1,100 @@ +# Introduction + +PaddlePaddle is a deep learning platform open-sourced by Baidu. With PaddlePaddle, you can easily train a classic neural network within a couple lines of configuration, or you can build sophisticated models that provide state-of-the-art performance on difficult learning tasks like sentiment analysis, machine translation, image caption and so on. + +## 1. A Classic Problem + +Now, to give you a hint of what using PaddlePaddle looks like, let's start with a fundamental learning problem - **simple linear regression** : you have observed a set of two-dimensional data points of `X` and `Y`, where `X` is an explanatory variable and `Y` is corresponding dependent variable, and you want to recover the underlying correlation between `X` and `Y`. Linear regression can be used in many practical scenarios. For example, `X` can be a variable about house size, and `Y` a variable about house price. You can build a model that captures relationship between them by observing real estate markets. + +## 2. Prepare the Data + +Suppose the true relationship can be characterized as `Y = 2X + 0.3`, let's see how to recover this pattern only from observed data. Here is a piece of python code that feeds synthetic data to PaddlePaddle. The code is pretty self-explanatory, the only extra thing you need to add for PaddlePaddle is a definition of input data types. + +```python +# dataprovider.py +from paddle.trainer.PyDataProvider2 import * +import random + +# define data types of input: 2 real numbers +@provider(input_types=[dense_vector(1), dense_vector(1)],use_seq=False) +def process(settings, input_file): + for i in xrange(2000): + x = random.random() + yield [x], [2*x+0.3] +``` + +## 3. Train a NeuralNetwork in PaddlePaddle + +To recover this relationship between `X` and `Y`, we use a neural network with one layer of linear activation units and a square error cost layer. Don't worry if you are not familiar with these terminologies, it's just saying that we are starting from a random line `Y' = wX + b` , then we gradually adapt `w` and `b` to minimize the difference between `Y'` and `Y`. Here is what it looks like in PaddlePaddle: + +```python +# trainer_config.py +from paddle.trainer_config_helpers import * + +# 1. read data. Suppose you saved above python code as dataprovider.py +data_file = 'empty.list' +with open(data_file, 'w') as f: f.writelines(' ') +define_py_data_sources2(train_list=data_file, test_list=None, + module='dataprovider', obj='process',args={}) + +# 2. learning algorithm +settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer()) + +# 3. Network configuration +x = data_layer(name='x', size=1) +y = data_layer(name='y', size=1) +y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) +cost = regression_cost(input=y_predict, label=y) +outputs(cost) +``` + +Some of the most fundamental usages of PaddlePaddle are demonstrated: + +- The first part shows how to feed data into PaddlePaddle. In general cases, PaddlePaddle reads raw data from a list of files, and then do some user-defined process to get real input. In this case, we only need to create a placeholder file since we are generating synthetic data on the fly. + +- The second part describes learning algorithm. It defines in what ways adjustments are made to model parameters. PaddlePaddle provides a rich set of optimizers, but a simple momentum based optimizer will suffice here, and it processes 12 data points each time. + +- Finally, the network configuration. It usually is as simple as "stacking" layers. Three kinds of layers are used in this configuration: + - **Data Layer**: a network always starts with one or more data layers. They provide input data to the rest of the network. In this problem, two data layers are used respectively for `X` and `Y`. + - **FC Layer**: FC layer is short for Fully Connected Layer, which connects all the input units to current layer and does the actual computation specified as activation function. Computation layers like this are the fundamental building blocks of a deeper model. + - **Cost Layer**: in training phase, cost layers are usually the last layers of the network. They measure the performance of current model, and provide guidence to adjust parameters. + +Now that everything is ready, you can train the network with a simple command line call: + ``` + paddle train --config=trainer_config.py --save_dir=./output --num_passes=30 + ``` + +This means that PaddlePaddle will train this network on the synthectic dataset for 30 passes, and save all the models under path `./output`. You will see from the messages printed out during training phase that the model cost is decreasing as time goes by, which indicates we are getting a closer guess. + + +## 4. Evaluate the Model + +Usually, a different dataset that left out during training phase should be used to evalute the models. However, we are lucky enough to know the real answer: `w=2, b=0.3`, thus a better option is to check out model parameters directly. + +In PaddlePaddle, training is just to get a collection of model parameters, which are `w` and `b` in this case. Each parameter is saved in an individual file in the popular `numpy` array format. Here is the code that reads parameters from last pass. + +```python +import numpy as np +import os + +def load(file_name): + with open(file_name, 'rb') as f: + f.read(16) # skip header for float type. + return np.fromfile(f, dtype=np.float32) + +print 'w=%.6f, b=%.6f' % (load('output/pass-00029/w'), load('output/pass-00029/b')) +# w=1.999743, b=0.300137 +``` + +
![](./parameters.png)
+ +Although starts from a random guess, you can see that value of `w` changes quickly towards 2 and `b` changes quickly towards 0.3. In the end, the predicted line is almost identical with real answer. + +There, you have recovered the underlying pattern between `X` and `Y` only from observed data. + + +## 5. Where to Go from Here + +- Build and Installation +- Quick Start +- Example and Demo diff --git a/release_doc/0.9.0/doc/_sources/layer.txt b/release_doc/0.9.0/doc/_sources/layer.txt new file mode 100644 index 0000000000000000000000000000000000000000..45f2e2bad542ff5c29c89201b356728cf7ca8c1c --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/layer.txt @@ -0,0 +1,4 @@ +# Layer Documents + +* [Layer Source Code Document](source/gserver/layers/index.rst) +* [Layer Python API Document](ui/api/trainer_config_helpers/layers_index.rst) diff --git a/release_doc/0.9.0/doc/_sources/source/api/api.txt b/release_doc/0.9.0/doc/_sources/source/api/api.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fc450202df73f5ca99c2c52f257243aa37c90d4 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/api/api.txt @@ -0,0 +1,5 @@ +API +======== + +.. doxygenfile:: paddle/api/PaddleAPI.h +.. doxygenfile:: paddle/api/Internal.h diff --git a/release_doc/0.9.0/doc/_sources/source/cuda/cuda/cuda.txt b/release_doc/0.9.0/doc/_sources/source/cuda/cuda/cuda.txt new file mode 100644 index 0000000000000000000000000000000000000000..52f17c2b2e48aec8e6fc8d5a7e4f443ad72d96a6 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/cuda/cuda/cuda.txt @@ -0,0 +1,39 @@ +Cuda +============= + +Dynamic Link Libs +-------------------------- + +hl_dso_loader.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_dso_loader.h + +GPU Resources +---------------- + +hl_cuda.ph +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda.ph + +hl_cuda.h +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda.h + +CUDA Wrapper +-------------- + +hl_cuda_cublas.h +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda_cublas.h + +hl_cuda_cudnn.h +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda_cudnn.h + +hl_cuda_cudnn.h +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_cuda_cudnn.ph + + + + diff --git a/release_doc/0.9.0/doc/_sources/source/cuda/cuda/index.txt b/release_doc/0.9.0/doc/_sources/source/cuda/cuda/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..5fa38ff0fc8cea2b97262ea5493dea27b322dc1c --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/cuda/cuda/index.txt @@ -0,0 +1,7 @@ +CUDA +==================== + +.. toctree:: + :maxdepth: 3 + + cuda.rst diff --git a/release_doc/0.9.0/doc/_sources/source/cuda/matrix/index.txt b/release_doc/0.9.0/doc/_sources/source/cuda/matrix/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..63f95eb46618fd43a1140e4d857ae7e2fc89a6ae --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/cuda/matrix/index.txt @@ -0,0 +1,7 @@ +Matrix +==================== + +.. toctree:: + :maxdepth: 3 + + matrix.rst diff --git a/release_doc/0.9.0/doc/_sources/source/cuda/matrix/matrix.txt b/release_doc/0.9.0/doc/_sources/source/cuda/matrix/matrix.txt new file mode 100644 index 0000000000000000000000000000000000000000..dd4f06599c5af29a0278617ffd1bd9f6ae6b222e --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/cuda/matrix/matrix.txt @@ -0,0 +1,61 @@ +Matrix +======= + +Base Matrix +------------- + +hl_matrix.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix.h + +hl_matrix_base.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix_base.cuh + +hl_matrix_apply.cuh +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix_apply.cuh + +hl_matrix_ops.cuh +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix_ops.cuh + +hl_matrix_type.cuh +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_matrix_type.cuh + +hl_sse_matrix_kernel.cuh +`````````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_sse_matrix_kernel.cuh + +hl_batch_transpose.h +`````````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_batch_transpose.h + +Sparse Matrix +-------------- + +hl_sparse.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_sparse.h + +hl_sparse.ph +`````````````````````` +.. doxygenfile:: paddle/cuda/include/hl_sparse.ph + +Others +--------------- + +hl_aggregate.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_aggregate.h + +hl_table_apply.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_table_apply.h + +hl_top_k.h +`````````````````` +.. doxygenfile:: paddle/cuda/include/hl_top_k.h + + diff --git a/release_doc/0.9.0/doc/_sources/source/cuda/rnn/index.txt b/release_doc/0.9.0/doc/_sources/source/cuda/rnn/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..4913e47ba1cbc1c2b93fe3e128626a8e66aedc62 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/cuda/rnn/index.txt @@ -0,0 +1,7 @@ +RNN +==================== + +.. toctree:: + :maxdepth: 3 + + rnn.rst diff --git a/release_doc/0.9.0/doc/_sources/source/cuda/rnn/rnn.txt b/release_doc/0.9.0/doc/_sources/source/cuda/rnn/rnn.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce8ed96692bcb79eec0e5e6ae52a8bf5f6573418 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/cuda/rnn/rnn.txt @@ -0,0 +1,36 @@ +Neural Networks +================== + +Base +------- +.. doxygenfile:: paddle/cuda/include/hl_gpu.h +.. doxygenfile:: paddle/cuda/include/hl_cnn.h +.. doxygenfile:: paddle/cuda/include/hl_functions.h +.. doxygenfile:: paddle/cuda/include/hl_avx_functions.h +.. doxygenfile:: paddle/cuda/include/hl_device_functions.cuh +.. doxygenfile:: paddle/cuda/include/hl_gpu_functions.cuh + +Activation Functions +----------------------- +.. doxygenfile:: paddle/cuda/include/hl_activation_functions.h + +RNN Related APIs +----------------- + +.. doxygenfile:: paddle/cuda/include/hl_recurrent_apply.cuh +.. doxygenfile:: paddle/cuda/include/hl_sequence.h + +LSTM Model +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_lstm.h +.. dpxygenfile:: paddle/cuda/include/hl_cpu_lstm.cuh +.. doxygenfile:: paddle/cuda/include/hl_gpu_lstm.cuh +.. doxygenfile:: paddle/cuda/include/hl_lstm_ops.cuh + +GRU Model +```````````````` +.. doxygenfile:: paddle/cuda/include/hl_gru_ops.cuh +.. doxygenfile:: paddle/cuda/include/hl_cpu_gru.cuh +.. doxygenfile:: paddle/cuda/include/hl_gpu_gru.cuh + + diff --git a/release_doc/0.9.0/doc/_sources/source/cuda/utils/index.txt b/release_doc/0.9.0/doc/_sources/source/cuda/utils/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..7a84cbe27dd21e326add1a0a1774cbaa089e195f --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/cuda/utils/index.txt @@ -0,0 +1,7 @@ +Utils +==================== + +.. toctree:: + :maxdepth: 3 + + utils.rst diff --git a/release_doc/0.9.0/doc/_sources/source/cuda/utils/utils.txt b/release_doc/0.9.0/doc/_sources/source/cuda/utils/utils.txt new file mode 100644 index 0000000000000000000000000000000000000000..1ea3e5404aa5fc792075aa09c7fd7a1986332c79 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/cuda/utils/utils.txt @@ -0,0 +1,23 @@ +Utilities +=========== + +HPPL Base +------------ + +hl_base.h +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_base.h + +Timer +----------- + +hl_time.h +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_time.h + +Thread Resource +----------- + +hl_thread.ph +`````````````` +.. doxygenfile:: paddle/cuda/include/hl_thread.ph diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/activations/index.txt b/release_doc/0.9.0/doc/_sources/source/gserver/activations/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccdae41128cd6b4edddda0ac44a825082d7495c9 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/activations/index.txt @@ -0,0 +1,5 @@ +Activations +============= + +.. doxygenclass:: paddle::ActivationFunction + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/dataprovider/dataproviders.txt b/release_doc/0.9.0/doc/_sources/source/gserver/dataprovider/dataproviders.txt new file mode 100644 index 0000000000000000000000000000000000000000..e8aa4bc35634a0c6ede192a15b276564f7a2c13e --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/dataprovider/dataproviders.txt @@ -0,0 +1,83 @@ +Data Providers +================ + +Base DataProvider +------------------ +.. doxygenclass:: paddle::DataProvider + :members: + +DataProviderGroup +------------------- +.. doxygenclass:: paddle::DataProviderGroup + :members: + +MultiDataProvider +------------------- +.. doxygenclass:: paddle::MultiDataProvider + :members: + +PyDataProvider +=================== + +IFieldScanner +------------- +.. doxygenclass:: paddle::IFieldScanner + :members: + +DenseScanner +------------- +.. doxygenclass:: paddle::DenseScanner + :members: + +IndexScanner +------------- +.. doxygenclass:: paddle::IndexScanner + :members: + +SparseNonValueScanner +--------------------- +.. doxygenclass:: paddle::SparseNonValueScanner + :members: + +SparseValueScanner +------------------ +.. doxygenclass:: paddle::SparseValueScanner + :members: + +SequenceScanner +------------------ +.. doxygenclass:: paddle::SparseValueScanner + :members: + +IPyDataProviderCache +-------------------- +.. doxygenclass:: paddle::IPyDataProviderCache + :members: + +NoCacheStrategy +--------------- +.. doxygenclass:: paddle::NoCacheStrategy + :members: + +CacheOnePassInMemory +-------------------- +.. doxygenclass:: paddle::CacheOnePassInMemory + :members: + +IPyDataProvider +--------------- +.. doxygenclass:: paddle::PyDataProvider2 + :members: + +Proto Data Provider +=================== + +ProtoDataProvider +---------------- +.. doxygenclass:: paddle::ProtoDataProvider + :members: + +ProtoSequenceDataProvider +---------------- +.. doxygenclass:: paddle::ProtoSequenceDataProvider + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/dataprovider/index.txt b/release_doc/0.9.0/doc/_sources/source/gserver/dataprovider/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f6077f1224f90f693515d3414da4d96dc652345 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/dataprovider/index.txt @@ -0,0 +1,7 @@ +Data Providers Documents +========================== + +.. toctree:: + :maxdepth: 3 + + dataproviders.rst diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/evaluators/evaluators.txt b/release_doc/0.9.0/doc/_sources/source/gserver/evaluators/evaluators.txt new file mode 100644 index 0000000000000000000000000000000000000000..0c5cc85e7dff31693bdc9d2ee44ef470a0fc5f90 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/evaluators/evaluators.txt @@ -0,0 +1,102 @@ +Base Evaluator +============== + +Evaluator +--------- +.. doxygenclass:: paddle::Evaluator + :members: + + +Utils +===== + +SumEvaluator +------------ +.. doxygenclass:: paddle::SumEvaluator + :members: + +ColumnSumEvaluator +------------------ +.. doxygenclass:: paddle::ColumnSumEvaluator + :members: + +Classification +============== + +ClassificationErrorEvaluator +--------------------------- +.. doxygenclass:: paddle::ClassificationErrorEvaluator + :members: + +SequenceClassificationErrorEvaluator +------------------------------------ +.. doxygenclass:: paddle::SequenceClassificationErrorEvaluator + :members: + +AucEvaluator +------------- +.. doxygenclass:: paddle::AucEvaluator + :members: + +PrecisionRecallEvaluator +------------------------ +.. doxygenclass:: paddle::PrecisionRecallEvaluator + :members: + +ChunkEvaluator +-------------- +.. doxygenclass:: paddle::ChunkEvaluator + :members: + +CTCEvaluator +------------ +.. doxygenclass:: paddle::CTCErrorEvaluator + :members: + + +Rank +==== + +PnpairEvaluator +------------- +.. doxygenclass:: paddle::PnpairEvaluator + :members: + +AucEvaluator +------------- +.. doxygenclass:: paddle::RankAucEvaluator + :members: + + +Printer +======= + +ValuePrinter +------------- +.. doxygenclass:: paddle::ValuePrinter + :members: + +GradientPrinter +--------------- +.. doxygenclass:: paddle::GradientPrinter + :members: + +MaxIdPrinter +------------ +.. doxygenclass:: paddle::MaxIdPrinter + :members: + +MaxFramePrinter +--------------- +.. doxygenclass:: paddle::MaxFramePrinter + :members: + +SequenceTextPrinter +------------------ +.. doxygenclass:: paddle::SequenceTextPrinter + :members: + +ClassificationErrorPrinter +-------------------------- +.. doxygenclass:: paddle::ClassificationErrorPrinter + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/evaluators/index.txt b/release_doc/0.9.0/doc/_sources/source/gserver/evaluators/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..298de3e1a32d36b9102f5ad64cc1b968f418041b --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/evaluators/index.txt @@ -0,0 +1,7 @@ +Evaluators +========== + +.. toctree:: + :maxdepth: 3 + + evaluators.rst diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/gradientmachines/gradientmachines.txt b/release_doc/0.9.0/doc/_sources/source/gserver/gradientmachines/gradientmachines.txt new file mode 100644 index 0000000000000000000000000000000000000000..3607664c850cdf4df4e10151b05f15e275adceaf --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/gradientmachines/gradientmachines.txt @@ -0,0 +1,40 @@ +Gradient Machines +================ + +GradientMachine +--------------------- +.. doxygenclass:: paddle::GradientMachine + :members: + +GradientMachineModel +-------------------- +.. doxygenclass:: paddle::IGradientMachineMode + :members: + +MultiGradientMachine +--------------------- +.. doxygenclass:: paddle::MultiGradientMachine + :members: + +TrainerThread +````````````` +.. doxygenclass:: paddle::TrainerThread + :members: + +Recurrent Gradient Machines +--------------------------- +.. doxygenclass:: paddle::RecurrentGradientMachine + :members: + +Networks +======== + +NeuralNetwork +------------- +.. doxygenclass:: paddle::NeuralNetwork + :members: + +ParallelNeuralNetwork +--------------------- +.. doxygenclass:: paddle::ParallelNeuralNetwork + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/gradientmachines/index.txt b/release_doc/0.9.0/doc/_sources/source/gserver/gradientmachines/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..997c29a102f53c165c70ff11cd9650b83bcecf44 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/gradientmachines/index.txt @@ -0,0 +1,7 @@ +Gradient Machines Documents +============================= + +.. toctree:: + :maxdepth: 3 + + gradientmachines.rst diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/layers/index.txt b/release_doc/0.9.0/doc/_sources/source/gserver/layers/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..559c5436b10a5977ac347611639b32d43f1ed123 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/layers/index.txt @@ -0,0 +1,7 @@ +Layers Documents +==================== + +.. toctree:: + :maxdepth: 3 + + layer.rst diff --git a/release_doc/0.9.0/doc/_sources/source/gserver/layers/layer.txt b/release_doc/0.9.0/doc/_sources/source/gserver/layers/layer.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b8e149505f0695ad2fa4be967a50d1a0ac48b43 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/gserver/layers/layer.txt @@ -0,0 +1,542 @@ +Base +====== + +Layer +----- +.. doxygenclass:: paddle::Layer + :members: + +Projection +---------- +.. doxygenclass:: paddle::Projection + :members: + +Operator +-------- +.. doxygenclass:: paddle::Operator + :members: + +Data Layer +=========== + +.. doxygenclass:: paddle::DataLayer + :members: + +Fully Connected Layers +====================== + +FullyConnectedLayer +------------------- +.. doxygenclass:: paddle::FullyConnectedLayer + :members: + +SelectiveFullyConnectedLayer +---------------------------- +.. doxygenclass:: paddle::SelectiveFullyConnectedLayer + :members: + +Conv Layers +=========== + +ConvBaseLayer +------------- +.. doxygenclass:: paddle::ConvBaseLayer + :members: + +ConvOperator +------------ +.. doxygenclass:: paddle::ConvOperator + :members: + +ConvShiftLayer +-------------- +.. doxygenclass:: paddle::ConvShiftLayer + :members: + +CudnnConvLayer +-------------- +.. doxygenclass:: paddle::CudnnConvLayer + :members: + +ExpandConvLayer +--------------- +.. doxygenclass:: paddle::ExpandConvLayer + :members: + +ContextProjection +----------------- +.. doxygenclass:: paddle::ContextProjection + :members: + +Pooling Layers +============== + +PoolLayer +--------- +.. doxygenclass:: paddle::PoolLayer + :members: + +PoolProjectionLayer +------------------- +.. doxygenclass:: paddle::PoolProjectionLayer + :members: + +CudnnPoolLayer +-------------- +.. doxygenclass:: paddle::CudnnPoolLayer + :members: + +Norm Layers +=========== + +NormLayer +--------- +.. doxygenclass:: paddle::NormLayer + :members: + +CMRProjectionNormLayer +---------------------- +.. doxygenclass:: paddle::CMRProjectionNormLayer + :members: + +DataNormLayer +------------- +.. doxygenclass:: paddle::DataNormLayer + :members: + +ResponseNormLayer +----------------- +.. doxygenclass:: paddle::ResponseNormLayer + :members: + +BatchNormBaseLayer +------------------ +.. doxygenclass:: paddle::BatchNormBaseLayer + :members: + +BatchNormalizationLayer +----------------------- +.. doxygenclass:: paddle::BatchNormalizationLayer + :members: + +CudnnBatchNormLayer +----------------------- +.. doxygenclass:: paddle::CudnnBatchNormLayer + :members: + +SumToOneNormLayer +----------------- +.. doxygenclass:: paddle::SumToOneNormLayer + :members: + +Activation Layer +================ + +ParameterReluLayer +------------------ +.. doxygenclass:: paddle::ParameterReluLayer + :members: + +Recurrent Layers +================ + +RecurrentLayer +-------------- +.. doxygenclass:: paddle::RecurrentLayer + :members: + +SequenceToBatch +--------------- +.. doxygenclass:: paddle::SequenceToBatch + :members: + +LSTM +---- +LstmLayer +````````` +.. doxygenclass:: paddle::LstmLayer + :members: + +LstmStepLayer +````````````` +.. doxygenclass:: paddle::LstmStepLayer + :members: + +LstmCompute +``````````` +.. doxygenclass:: paddle::LstmCompute + :members: + +MDLSTM +------ +MDLstmLayer +``````````` +.. doxygenclass:: paddle::MDLstmLayer + :members: + +CoordIterator +````````````` +.. doxygenclass:: paddle::CoordIterator + :members: + +GRU +--- +GatedRecurrentLayer +``````````````````` +.. doxygenclass:: paddle::GatedRecurrentLayer + :members: + +GruStepLayer +```````````` +.. doxygenclass:: paddle::GruStepLayer + :members: + +GruCompute +`````````` +.. doxygenclass:: paddle::GruCompute + :members: + +Recurrent Layer Group +===================== + +AgentLayer +---------- +.. doxygenclass:: paddle::AgentLayer + :members: + +SequenceAgentLayer +------------------ +.. doxygenclass:: paddle::SequenceAgentLayer + :members: + +GatherAgentLayer +---------------- +.. doxygenclass:: paddle::GatherAgentLayer + :members: + +SequenceGatherAgentLayer +------------------------ +.. doxygenclass:: paddle::SequenceGatherAgentLayer + :members: + +ScatterAgentLayer +----------------- +.. doxygenclass:: paddle::ScatterAgentLayer + :members: + +SequenceScatterAgentLayer +------------------------- +.. doxygenclass:: paddle::SequenceScatterAgentLayer + :members: + +GetOutputLayer +-------------- +.. doxygenclass:: paddle::GetOutputLayer + :members: + +Mixed Layer +=========== +.. doxygenclass:: paddle::MixedLayer + :members: + +DotMulProjection +---------------- +.. doxygenclass:: paddle::DotMulProjection + :members: + +DotMulOperator +-------------- +.. doxygenclass:: paddle::DotMulOperator + :members: + +FullMatrixProjection +-------------------- +.. doxygenclass:: paddle::FullMatrixProjection + :members: + +IdentityProjection +------------------ +.. doxygenclass:: paddle::IdentityProjection + :members: + +IdentityOffsetProjection +------------------------ +.. doxygenclass:: paddle::IdentityOffsetProjection + :members: + +TableProjection +--------------- +.. doxygenclass:: paddle::TableProjection + :members: + +TransposedFullMatrixProjection +------------------------------ +.. doxygenclass:: paddle::TransposedFullMatrixProjection + :members: + +Aggregate Layers +================ + +Aggregate +--------- +AverageLayer +```````````` +.. doxygenclass:: paddle::AverageLayer + :members: + +MaxLayer +```````` +.. doxygenclass:: paddle::MaxLayer + :members: + +SequenceLastInstanceLayer +````````````````````````` +.. doxygenclass:: paddle::SequenceLastInstanceLayer + :members: + +Concat +------ +ConcatenateLayer +```````````````` +.. doxygenclass:: paddle::ConcatenateLayer + :members: + +ConcatenateLayer2 +````````````````` +.. doxygenclass:: paddle::ConcatenateLayer2 + :members: + +SequenceConcatLayer +``````````````````` +.. doxygenclass:: paddle::SequenceConcatLayer + :members: + +Subset +------ +SubSequenceLayer +```````````````` +.. doxygenclass:: paddle::SubSequenceLayer + :members: + +Reshaping Layers +================ + +BlockExpandLayer +---------------- +.. doxygenclass:: paddle::BlockExpandLayer + :members: + +ExpandLayer +----------- +.. doxygenclass:: paddle::ExpandLayer + :members: + +FeatureMapExpandLayer +--------------------- +.. doxygenclass:: paddle::FeatureMapExpandLayer + :members: + +ResizeLayer +----------- +.. doxygenclass:: paddle::ResizeLayer + :members: + +SequenceReshapeLayer +-------------------- +.. doxygenclass:: paddle::SequenceReshapeLayer + :members: + +Math Layers +=========== + +AddtoLayer +---------- +.. doxygenclass:: paddle::AddtoLayer + :members: + +ConvexCombinationLayer +---------------------- +.. doxygenclass:: paddle::ConvexCombinationLayer + :members: + +InterpolationLayer +------------------ +.. doxygenclass:: paddle::InterpolationLayer + :members: + +MultiplexLayer +-------------- +.. doxygenclass:: paddle::MultiplexLayer + :members: + +OuterProdLayer +-------------- +.. doxygenclass:: paddle::OuterProdLayer + :members: + +PowerLayer +---------- +.. doxygenclass:: paddle::PowerLayer + :members: + +ScalingLayer +------------ +.. doxygenclass:: paddle::ScalingLayer + :members: + +SlopeInterceptLayer +------------------- +.. doxygenclass:: paddle::SlopeInterceptLayer + :members: + +TensorLayer +------------ +.. doxygenclass:: paddle::TensorLayer + :members: + +TransLayer +---------- +.. doxygenclass:: paddle::TransLayer + :members: + +Sampling Layers +=============== + +MultinomialSampler +------------------ +.. doxygenclass:: paddle::MultinomialSampler + :members: + +MaxIdLayer +---------- +.. doxygenclass:: paddle::MaxIdLayer + :members: + +SamplingIdLayer +--------------- +.. doxygenclass:: paddle::SamplingIdLayer + :members: + +Cost Layers +=========== + +CostLayer +----------- +.. doxygenclass:: paddle::CostLayer + :members: + +HuberTwoClass +````````````` +.. doxygenclass:: paddle::HuberTwoClass + :members: + +LambdaCost +``````````` +.. doxygenclass:: paddle::LambdaCost + :members: + +MultiBinaryLabelCrossEntropy +```````````````````````````` +.. doxygenclass:: paddle::MultiBinaryLabelCrossEntropy + :members: + +MultiClassCrossEntropy +``````````````````````` +.. doxygenclass:: paddle::MultiClassCrossEntropy + :members: + +MultiClassCrossEntropyWithSelfNorm +`````````````````````````````````` +.. doxygenclass:: paddle::MultiClassCrossEntropyWithSelfNorm + :members: + +RankingCost +``````````` +.. doxygenclass:: paddle::RankingCost + :members: + +SoftBinaryClassCrossEntropy +``````````````````````````` +.. doxygenclass:: paddle::SoftBinaryClassCrossEntropy + :members: + +SumOfSquaresCostLayer +````````````````````` +.. doxygenclass:: paddle::SumOfSquaresCostLayer + :members: + +SumCostLayer +````````````````````` +.. doxygenclass:: paddle::SumCostLayer + :members: + +CosSimLayer +----------- +.. doxygenclass:: paddle::CosSimLayer + :members: + +CosSimVecMatLayer +----------------- +.. doxygenclass:: paddle::CosSimVecMatLayer + :members: + +CRFDecodingLayer +---------------- +.. doxygenclass:: paddle::CRFDecodingLayer + :members: + +CRFLayer +-------- +.. doxygenclass:: paddle::CRFLayer + :members: + +CTCLayer +-------- +.. doxygenclass:: paddle::CTCLayer + :members: + +HierarchicalSigmoidLayer +------------------------ +.. doxygenclass:: paddle::HierarchicalSigmoidLayer + :members: + +LinearChainCRF +-------------- +.. doxygenclass:: paddle::LinearChainCRF + :members: + +LinearChainCTC +-------------- +.. doxygenclass:: paddle::LinearChainCTC + :members: + +NCELayer +-------- +.. doxygenclass:: paddle::NCELayer + :members: + +Validation Layers +----------------- + +ValidationLayer +``````````````` +.. doxygenclass:: paddle::ValidationLayer + :members: + +AucValidation +````````````` +.. doxygenclass:: paddle::AucValidation + :members: + +PnpairValidation +```````````````` +.. doxygenclass:: paddle::PnpairValidation + :members: + +Check Layers +============ + +EosIdCheckLayer +--------------- +.. doxygenclass:: paddle::EosIdCheckLayer + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/index.txt b/release_doc/0.9.0/doc/_sources/source/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..55fcdeb3dfcedd8589bf7986682708a957c05746 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/index.txt @@ -0,0 +1,49 @@ +# Source Code Documents + +## cuda + +- [CUDA](cuda/cuda/index.rst) +- [Matrix](cuda/matrix/index.rst) +- [RNN](cuda/rnn/index.rst) +- [Utils](cuda/utils/index.rst) + +## gserver + +- [Activations](gserver/activations/index.rst) +- [Data Providers](gserver/dataprovider/index.rst) +- [Evaluators](gserver/evaluators/index.rst) +- [Gradient Machines](gserver/gradientmachines/index.rst) +- [Layers](gserver/layers/index.rst) + +## math + +- [Matrix](math/matrix/index.rst) +- [Utils](math/utils/index.rst) + +## parameter + +- [Parameter](parameter/parameter/index.rst) +- [Update](parameter/update/index.rst) +- [Optimizer](parameter/optimizer/index.rst) + +## pserver + +- [Client](pserver/client/index.rst) +- [Network](pserver/network/index.rst) +- [Server](pserver/server/index.rst) + +## trainer + +- [Trainer](trainer/trainer.rst) + +## api + +- [API](api/api.rst) + +## utils + +- [CustomStackTrace](utils/customStackTrace.rst) +- [Enumeration wrapper](utils/enum.rst) +- [Lock](utils/lock.rst) +- [Queue](utils/queue.rst) +- [Thread](utils/thread.rst) diff --git a/release_doc/0.9.0/doc/_sources/source/math/matrix/index.txt b/release_doc/0.9.0/doc/_sources/source/math/matrix/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..68410f2a27b68c87087f2c17de351495ac6a6cd0 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/math/matrix/index.txt @@ -0,0 +1,7 @@ +Matrix Documents +==================== + +.. toctree:: + :maxdepth: 3 + + matrix.rst diff --git a/release_doc/0.9.0/doc/_sources/source/math/matrix/matrix.txt b/release_doc/0.9.0/doc/_sources/source/math/matrix/matrix.txt new file mode 100644 index 0000000000000000000000000000000000000000..b12e3934f4705d4a2b7d3d790873701ddfe27d9f --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/math/matrix/matrix.txt @@ -0,0 +1,20 @@ +Matrix +======= + +Base +-------- +.. doxygenfile:: paddle/math/BaseMatrix.h + +Sparse Matrix +---------------- +.. doxygenfile:: paddle/math/Matrix.h +.. doxygenfile:: paddle/math/Vector.h +.. doxygenfile:: paddle/math/MathUtils.h +.. doxygenfile:: paddle/math/SparseMatrix.h +.. doxygenfile:: paddle/math/SparseRowMatrix.h +.. doxygenfile:: paddle/math/CpuSparseMatrix.h + +Others +---------- +.. doxygenfile:: paddle/math/MathFunctions.h +.. doxygenfile:: paddle/math/SIMDFunctions.h diff --git a/release_doc/0.9.0/doc/_sources/source/math/utils/index.txt b/release_doc/0.9.0/doc/_sources/source/math/utils/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..e5fe335da29b957706ed52662682d11c425e5908 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/math/utils/index.txt @@ -0,0 +1,7 @@ +Utils Documents +==================== + +.. toctree:: + :maxdepth: 3 + + utils.rst diff --git a/release_doc/0.9.0/doc/_sources/source/math/utils/utils.txt b/release_doc/0.9.0/doc/_sources/source/math/utils/utils.txt new file mode 100644 index 0000000000000000000000000000000000000000..3df721a47b93bce950185f2d6ffe22d4a801af30 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/math/utils/utils.txt @@ -0,0 +1,9 @@ +Utils +======= + +Memory Handle +-------------- +.. doxygenfile:: paddle/math/MemoryHandle.h +.. doxygenfile:: paddle/math/Allocator.h +.. doxygenfile:: paddle/math/PoolAllocator.h +.. doxygenfile:: paddle/math/Storage.h diff --git a/release_doc/0.9.0/doc/_sources/source/parameter/optimizer/index.txt b/release_doc/0.9.0/doc/_sources/source/parameter/optimizer/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..3338af5608a03ee853e3a5f16d2483b810215514 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/parameter/optimizer/index.txt @@ -0,0 +1,7 @@ +Parameter Documents +==================== + +.. toctree:: + :maxdepth: 3 + + optimizer.rst diff --git a/release_doc/0.9.0/doc/_sources/source/parameter/optimizer/optimizer.txt b/release_doc/0.9.0/doc/_sources/source/parameter/optimizer/optimizer.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d9e49217eb17541c14d8d64715278e62c99d2b4 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/parameter/optimizer/optimizer.txt @@ -0,0 +1,7 @@ +Optimizer +============ + +.. doxygenfile:: paddle/parameter/FirstOrderOptimizer.h +.. doxygenfile:: paddle/parameter/AverageOptimizer.h +.. doxygenfile:: paddle/parameter/ParameterOptimizer.h +.. doxygenfile:: paddle/parameter/OptimizerWithRegularizer.h diff --git a/release_doc/0.9.0/doc/_sources/source/parameter/parameter/index.txt b/release_doc/0.9.0/doc/_sources/source/parameter/parameter/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..e7ed70ec4c87b3613cd8450f1e7fca1fb974afca --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/parameter/parameter/index.txt @@ -0,0 +1,7 @@ +Parameter Documents +==================== + +.. toctree:: + :maxdepth: 3 + + parameter.rst diff --git a/release_doc/0.9.0/doc/_sources/source/parameter/parameter/parameter.txt b/release_doc/0.9.0/doc/_sources/source/parameter/parameter/parameter.txt new file mode 100644 index 0000000000000000000000000000000000000000..2b7afdb4093753598d73c686b1dc81b970d199d5 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/parameter/parameter/parameter.txt @@ -0,0 +1,16 @@ +Parameter +============= + +Weight +-------- +.. doxygenfile:: paddle/parameter/Weight.h + +Regularizer +------------ +.. doxygenfile:: paddle/parameter/Regularizer.h + +Parameter +------------- +.. doxygenfile:: paddle/parameter/Argument.h +.. doxygenfile:: paddle/parameter/Parameter.h +.. doxygenfile:: paddle/parameter/ParallelParameter.h diff --git a/release_doc/0.9.0/doc/_sources/source/parameter/update/index.txt b/release_doc/0.9.0/doc/_sources/source/parameter/update/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..1bbd73319396e7b8ea32c78e0fe3569919bacf2d --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/parameter/update/index.txt @@ -0,0 +1,7 @@ +Parameter Documents +==================== + +.. toctree:: + :maxdepth: 3 + + update.rst diff --git a/release_doc/0.9.0/doc/_sources/source/parameter/update/update.txt b/release_doc/0.9.0/doc/_sources/source/parameter/update/update.txt new file mode 100644 index 0000000000000000000000000000000000000000..c417602f0338dbd84ae2bd2ca4eb09330202a0e8 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/parameter/update/update.txt @@ -0,0 +1,7 @@ +Update +========== + +.. doxygenfile:: paddle/parameter/ParameterUpdaterBase.h +.. doxygenfile:: paddle/parameter/ParameterUpdaterHook.h +.. doxygenfile:: paddle/parameter/ParameterUpdateFunctions.h + diff --git a/release_doc/0.9.0/doc/_sources/source/pserver/client/client.txt b/release_doc/0.9.0/doc/_sources/source/pserver/client/client.txt new file mode 100644 index 0000000000000000000000000000000000000000..fc7ed90d3dc8beb0baa30d63ccc956fbba2a4e4c --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/pserver/client/client.txt @@ -0,0 +1,14 @@ +Client +========= + +.. doxygenclass:: paddle::BaseClient + :members: + :protected-members: + :private-members: + :undoc-members: + +.. doxygenclass:: paddle::ParameterClient2 + :members: + :protected-members: + :private-members: + :undoc-members: diff --git a/release_doc/0.9.0/doc/_sources/source/pserver/client/index.txt b/release_doc/0.9.0/doc/_sources/source/pserver/client/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..dc924c9ca8e7b9965638fd299dc2f5e78591c91b --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/pserver/client/index.txt @@ -0,0 +1,7 @@ +Client Documents +==================== + +.. toctree:: + :maxdepth: 3 + + client.rst diff --git a/release_doc/0.9.0/doc/_sources/source/pserver/network/index.txt b/release_doc/0.9.0/doc/_sources/source/pserver/network/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..2fdf95e17d339d69de8e027d92cbb385e2bd51ec --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/pserver/network/index.txt @@ -0,0 +1,7 @@ +Network Documents +==================== + +.. toctree:: + :maxdepth: 3 + + network.rst diff --git a/release_doc/0.9.0/doc/_sources/source/pserver/network/network.txt b/release_doc/0.9.0/doc/_sources/source/pserver/network/network.txt new file mode 100644 index 0000000000000000000000000000000000000000..e000ff8dbbdc37e9d638d18d20a8ba53e21dd245 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/pserver/network/network.txt @@ -0,0 +1,42 @@ +Network +========== + +Socket Server +---------------- +.. doxygenclass:: paddle::SocketServer + :members: + :protected-members: + :private-members: + :undoc-members: + +Socket Worker +---------------- +.. doxygenclass:: paddle::SocketWorker + :members: + :protected-members: + :private-members: + :undoc-members: + +Socket Client +---------------- +.. doxygenclass:: paddle::SocketClient + :members: + :protected-members: + :private-members: + :undoc-members: + +Socket Channel +--------------- +.. doxygenclass:: paddle::SocketChannel + :members: + :protected-members: + :private-members: + :undoc-members: + +Message Reader +--------------- +.. doxygenclass:: paddle::MsgReader + :members: + :protected-members: + :private-members: + :undoc-members: diff --git a/release_doc/0.9.0/doc/_sources/source/pserver/server/index.txt b/release_doc/0.9.0/doc/_sources/source/pserver/server/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..09e3530bfeaf56ebbadb1694a69a036813e8970f --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/pserver/server/index.txt @@ -0,0 +1,7 @@ +Server Documents +==================== + +.. toctree:: + :maxdepth: 3 + + server.rst diff --git a/release_doc/0.9.0/doc/_sources/source/pserver/server/server.txt b/release_doc/0.9.0/doc/_sources/source/pserver/server/server.txt new file mode 100644 index 0000000000000000000000000000000000000000..f3110fdd731d246ce4211d05e32ddd98584bdbb7 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/pserver/server/server.txt @@ -0,0 +1,14 @@ +Server +========== + +.. doxygenclass:: paddle::ProtoServer + :members: + :protected-members: + :private-members: + :undoc-members: + +.. doxygenclass:: paddle::ParameterServer2 + :members: + :protected-members: + :private-members: + :undoc-members: diff --git a/release_doc/0.9.0/doc/_sources/source/trainer/trainer.txt b/release_doc/0.9.0/doc/_sources/source/trainer/trainer.txt new file mode 100644 index 0000000000000000000000000000000000000000..12c24597e7f99cd489204602ae25a89d7b960630 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/trainer/trainer.txt @@ -0,0 +1,32 @@ +Trainer +======= + +TrainerStats +------------ + +.. doxygenclass:: paddle::TrainerStats + :members: + +RemoteParameterUpdater +----------------------- + +.. doxygenclass:: paddle::RemoteParameterUpdater + :members: + +ConcurrentRemoteParameterUpdater +--------------------------------- + +.. doxygenclass:: paddle::ConcurrentRemoteParameterUpdater + :members: + +SparseRemoteParameterUpdater +---------------------------- + +.. doxygenclass:: paddle::SparseRemoteParameterUpdater + :members: + +SparseRemoteParameterUpdaterComposite +------------------------------------- + +.. doxygenclass:: paddle::SparseRemoteParameterUpdaterComposite + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/utils/customStackTrace.txt b/release_doc/0.9.0/doc/_sources/source/utils/customStackTrace.txt new file mode 100644 index 0000000000000000000000000000000000000000..a4e6f05a406f33256548fc0ef32bbbf3daff1536 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/utils/customStackTrace.txt @@ -0,0 +1,9 @@ +CustomStackTrace +================ + + +class CustomStackTrace +---------------------- + +.. doxygenclass:: paddle::CustomStackTrace + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/utils/enum.txt b/release_doc/0.9.0/doc/_sources/source/utils/enum.txt new file mode 100644 index 0000000000000000000000000000000000000000..17166d35f7cfa63e51058cc5f86165b1e22bbe1e --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/utils/enum.txt @@ -0,0 +1,9 @@ +enumeration_wrapper +=================== + + +namespace paddle::enumeration_wrapper +------------------------------------- + +.. doxygennamespace:: paddle::enumeration_wrapper + diff --git a/release_doc/0.9.0/doc/_sources/source/utils/lock.txt b/release_doc/0.9.0/doc/_sources/source/utils/lock.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b027e403f49fc1720904cf4b502d81e4148e1e3 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/utils/lock.txt @@ -0,0 +1,37 @@ +Thread +====== + + +class Thread +------------ + +.. doxygenclass:: paddle::Thread + :members: + + +class ThreadWorker +------------------ + +.. doxygenclass:: paddle::ThreadWorker + :members: + + +class SyncThreadPool +-------------------- + +.. doxygenclass:: paddle::SyncThreadPool + :members: + + +class MultiThreadWorker +----------------------- + +.. doxygenclass:: paddle::MultiThreadWorker + :members: + + +class AsyncThreadPool +--------------------- + +.. doxygenclass:: paddle::AsyncThreadPool + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/utils/queue.txt b/release_doc/0.9.0/doc/_sources/source/utils/queue.txt new file mode 100644 index 0000000000000000000000000000000000000000..72a464ca67288d0d0e24980d59c3bbc85f111081 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/utils/queue.txt @@ -0,0 +1,16 @@ +Queue +===== + + +class Queue +------------ + +.. doxygenclass:: paddle::Queue + :members: + + +class BlockingQueue +------------------- + +.. doxygenclass:: paddle::BlockingQueue + :members: diff --git a/release_doc/0.9.0/doc/_sources/source/utils/thread.txt b/release_doc/0.9.0/doc/_sources/source/utils/thread.txt new file mode 100644 index 0000000000000000000000000000000000000000..2eb67dde6a945cc8e250989f7fc8cefed942950e --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/source/utils/thread.txt @@ -0,0 +1,40 @@ +Lock +==== + + +class RWLock +------------ + +.. doxygenclass:: paddle::RWLock + :members: + +class ReadLockGuard +------------------- + +.. doxygenclass:: paddle::ReadLockGuard + :members: + +class SpinLock +-------------- + +.. doxygenclass:: paddle::SpinLock + :members: + +class Semaphore +--------------- + +.. doxygenclass:: paddle::Semaphore + :members: + +class ThreadBarrier +------------------- + +.. doxygenclass:: paddle::ThreadBarrier + :members: + +class LockedCondition +--------------------- + +.. doxygenclass:: paddle::LockedCondition + :members: + diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/activations.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/activations.txt new file mode 100644 index 0000000000000000000000000000000000000000..269e6491e7ebe3899c3fb24fca756a393043473b --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/activations.txt @@ -0,0 +1,108 @@ +=========== +Activations +=========== + +BaseActivation +============== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: BaseActivation + :noindex: + +AbsActivation +=============== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: AbsActivation + :noindex: + +ExpActivation +=============== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: ExpActivation + :noindex: + +IdentityActivation +================== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: IdentityActivation + :noindex: + +LinearActivation +================== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: LinearActivation + :noindex: + +LogActivation +================== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: LogActivation + :noindex: + +SquareActivation +================ + +.. automodule:: paddle.trainer_config_helpers.activations + :members: SquareActivation + :noindex: + +SigmoidActivation +================= + +.. automodule:: paddle.trainer_config_helpers.activations + :members: SigmoidActivation + :noindex: + +SoftmaxActivation +================= + +.. automodule:: paddle.trainer_config_helpers.activations + :members: SoftmaxActivation + :noindex: + +SequenceSoftmaxActivation +========================= + +.. automodule:: paddle.trainer_config_helpers.activations + :members: SequenceSoftmaxActivation + :noindex: + +ReluActivation +============== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: ReluActivation + :noindex: + +BReluActivation +=============== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: BReluActivation + :noindex: + +SoftReluActivation +================== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: SoftReluActivation + :noindex: + +TanhActivation +============== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: TanhActivation + :noindex: + +STanhActivation +=============== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: STanhActivation + :noindex: diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/attrs.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/attrs.txt new file mode 100644 index 0000000000000000000000000000000000000000..44919aba90df0b9da7c311a62339052c16c44ad1 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/attrs.txt @@ -0,0 +1,5 @@ +Parameter and Extra Layer Attribute +=================================== + +.. automodule:: paddle.trainer_config_helpers.attrs + :members: diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/data_sources.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/data_sources.txt new file mode 100644 index 0000000000000000000000000000000000000000..44ea59df43762508e86c7b867fcf136d84c8351e --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/data_sources.txt @@ -0,0 +1,5 @@ +DataSources +=========== + +.. automodule:: paddle.trainer_config_helpers.data_sources + :members: diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt new file mode 100644 index 0000000000000000000000000000000000000000..d6a79c13e2316b0fd3d53eb47960a767bcf8abdb --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/evaluators.txt @@ -0,0 +1,106 @@ +========== +Evaluators +========== + +Base +==== +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: evaluator_base + :noindex: + +Classification +============== + +classification_error_evaluator +------------------------------ +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: classification_error_evaluator + :noindex: + +auc_evaluator +------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: auc_evaluator + :noindex: + +ctc_error_evaluator +------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: ctc_error_evaluator + :noindex: + +chunk_evaluator +--------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: chunk_evaluator + :noindex: + +precision_recall_evaluator +-------------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: precision_recall_evaluator + :noindex: + +Rank +==== + +pnpair_evaluator +---------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: pnpair_evaluator + :noindex: + +Utils +===== + +sum_evaluator +------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: sum_evaluator + :noindex: + +column_sum_evaluator +-------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: column_sum_evaluator + :noindex: + +Print +===== + +classification_error_printer_evaluator +-------------------------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: classification_error_printer_evaluator + :noindex: + +gradient_printer_evaluator +-------------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: gradient_printer_evaluator + :noindex: + +maxid_printer_evaluator +----------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: maxid_printer_evaluator + :noindex: + +maxframe_printer_evaluator +--------------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: maxframe_printer_evaluator + :noindex: + +seqtext_printer_evaluator +------------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: seqtext_printer_evaluator + :noindex: + +value_printer_evaluator +----------------------- +.. automodule:: paddle.trainer_config_helpers.evaluators + :members: value_printer_evaluator + :noindex: + diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/index.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..8395eb75710b3e67ec0c5442f79c999bdacdff42 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/index.txt @@ -0,0 +1,14 @@ +Model Config Interface +====================== + +.. toctree:: + :maxdepth: 1 + + optimizers.rst + data_sources.rst + layers.rst + activations.rst + poolings.rst + networks.rst + evaluators.rst + attrs.rst diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/layers.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/layers.txt new file mode 100644 index 0000000000000000000000000000000000000000..4a02af396993207d305be488c993ce94cf20fe1d --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/layers.txt @@ -0,0 +1,445 @@ +====== +Layers +====== + +Base +====== + +LayerType +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: LayerType + :noindex: + +LayerOutput +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: LayerOutput + :noindex: + +Data layer +=========== + +data_layer +---------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: data_layer + :noindex: + +Fully Connected Layers +====================== + +fc_layer +-------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: fc_layer + :noindex: + +selective_fc_layer +------------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: selective_fc_layer + :noindex: + +Conv Layers +=========== + +conv_operator +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: conv_operator + :noindex: + +conv_projection +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: conv_projection + :noindex: + +conv_shift_layer +------------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: conv_shift_layer + :noindex: + +img_conv_layer +-------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: img_conv_layer + :noindex: + +context_projection +------------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: context_projection + :noindex: + +Image Pooling Layer +=================== + +img_pool_layer +-------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: img_pool_layer + :noindex: + +spp_layer +-------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: spp_layer + :noindex: + +maxout_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: maxout_layer + :noindex: + +Norm Layer +========== + +img_cmrnorm_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: img_cmrnorm_layer + :noindex: + +batch_norm_layer +--------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: batch_norm_layer + :noindex: + +sum_to_one_norm_layer +--------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: sum_to_one_norm_layer + :noindex: + +Recurrent Layers +================ + +recurrent_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: recurrent_layer + :noindex: + +lstmemory +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: lstmemory + :noindex: + +lstm_step_layer +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: lstm_step_layer + :noindex: + +grumemory +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: grumemory + :noindex: + +gru_step_layer +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: gru_step_layer + :noindex: + +Recurrent Layer Group +===================== + +memory +------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: memory + :noindex: + +recurrent_group +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: recurrent_group + :noindex: + +beam_search +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: beam_search + :noindex: + +get_output_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: get_output_layer + :noindex: + +Mixed Layer +=========== + +mixed_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: mixed_layer + :noindex: + +embedding_layer +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: embedding_layer + :noindex: + +scaling_projection +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: scaling_projection + :noindex: + +dotmul_projection +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: dotmul_projection + :noindex: + +dotmul_operator +--------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: dotmul_operator + :noindex: + +full_matrix_projection +---------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: full_matrix_projection + :noindex: + +identity_projection +------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: identity_projection + :noindex: + + +table_projection +---------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: table_projection + :noindex: + +trans_full_matrix_projection +---------------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: trans_full_matrix_projection + :noindex: + +Aggregate Layers +================ + +pooling_layer +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: pooling_layer + :noindex: + +last_seq +-------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: last_seq + :noindex: + +first_seq +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: first_seq + :noindex: + +concat_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: concat_layer + :noindex: + +Reshaping Layers +================ + +block_expand_layer +------------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: block_expand_layer + :noindex: + +expand_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: expand_layer + :noindex: + +repeat_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: repeat_layer + :noindex: + +Math Layers +=========== + +addto_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: addto_layer + :noindex: + +linear_comb_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: linear_comb_layer + :noindex: + +interpolation_layer +------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: interpolation_layer + :noindex: + +bilinear_interp_layer +---------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: bilinear_interp_layer + :noindex: + +power_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: power_layer + :noindex: + +scaling_layer +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: scaling_layer + :noindex: + +slope_intercept_layer +---------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: slope_intercept_layer + :noindex: + +tensor_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: tensor_layer + :noindex: + +cos_sim +------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: cos_sim + :noindex: + +trans_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: trans_layer + :noindex: + +Sampling Layers +=============== + +maxid_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: maxid_layer + :noindex: + +sampling_id_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: sampling_id_layer + :noindex: + +Cost Layers +=========== + +cross_entropy +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: cross_entropy + :noindex: + +cross_entropy_with_selfnorm +--------------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: cross_entropy_with_selfnorm + :noindex: + +multi_binary_label_cross_entropy +-------------------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: multi_binary_label_cross_entropy + :noindex: + +huber_cost +---------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: huber_cost + :noindex: + +lambda_cost +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: lambda_cost + :noindex: + +rank_cost +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: rank_cost + :noindex: + +crf_layer +----------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: crf_layer + :noindex: + +crf_decoding_layer +------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: crf_decoding_layer + :noindex: + +ctc_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: ctc_layer + :noindex: + +nce_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: nce_layer + :noindex: + +hsigmoid +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: hsigmoid + :noindex: + +sum_cost +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: sum_cost + :noindex: + +Check Layer +============ + +eos_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: eos_layer + :noindex: diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/networks.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/networks.txt new file mode 100644 index 0000000000000000000000000000000000000000..29c52c5ce3078f1755162dbbdd65a059d8ba9fa4 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/networks.txt @@ -0,0 +1,119 @@ +======== +Networks +======== + +The networks module contains pieces of neural network that combine multiple layers. + +NLP +=== + +sequence_conv_pool +------------------ +.. automodule:: paddle.trainer_config_helpers.networks + :members: sequence_conv_pool + :noindex: + +text_conv_pool +-------------- +.. automodule:: paddle.trainer_config_helpers.networks + :members: text_conv_pool + :noindex: + +Images +====== + +img_conv_bn_pool +---------------- +.. automodule:: paddle.trainer_config_helpers.networks + :members: img_conv_bn_pool + :noindex: + +img_conv_group +-------------- +.. automodule:: paddle.trainer_config_helpers.networks + :members: img_conv_group + :noindex: + +simple_img_conv_pool +-------------------- +.. automodule:: paddle.trainer_config_helpers.networks + :members: simple_img_conv_pool + :noindex: + +vgg_16_network +--------------- +.. automodule:: paddle.trainer_config_helpers.networks + :members: vgg_16_network + :noindex: + +Recurrent +========= + +LSTM +---- + +lstmemory_unit +`````````````` +.. automodule:: paddle.trainer_config_helpers.networks + :members: lstmemory_unit + :noindex: + +lstmemory_group +``````````````` +.. automodule:: paddle.trainer_config_helpers.networks + :members: lstmemory_group + :noindex: + +simple_lstm +``````````` +.. automodule:: paddle.trainer_config_helpers.networks + :members: simple_lstm + :noindex: + +bidirectional_lstm +`````````````````` +.. automodule:: paddle.trainer_config_helpers.networks + :members: bidirectional_lstm + :noindex: + +GRU +--- + +gru_unit +```````` +.. automodule:: paddle.trainer_config_helpers.networks + :members: gru_unit + :noindex: + +gru_group +````````` +.. automodule:: paddle.trainer_config_helpers.networks + :members: gru_group + :noindex: + +simple_gru +`````````` +.. automodule:: paddle.trainer_config_helpers.networks + :members: simple_gru + :noindex: + +simple_attention +---------------- +.. automodule:: paddle.trainer_config_helpers.networks + :members: simple_attention + :noindex: + +Miscs +===== + +dropout_layer +-------------- +.. automodule:: paddle.trainer_config_helpers.networks + :members: dropout_layer + :noindex: + +outputs +------- +.. automodule:: paddle.trainer_config_helpers.networks + :members: outputs + :noindex: diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt new file mode 100644 index 0000000000000000000000000000000000000000..7ca4e34156e273caf66cc71e6927bfb23bb5235e --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/optimizers.txt @@ -0,0 +1,57 @@ +========== +Optimizers +========== + +BaseSGDOptimizer +================ +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: BaseSGDOptimizer + :noindex: + +MomentumOptimizer +================= +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: MomentumOptimizer + :noindex: + +AdamOptimizer +============= +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: AdamOptimizer + :noindex: + +AdamaxOptimizer +================ +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: AdamaxOptimizer + :noindex: + +AdaGradOptimizer +================ +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: AdaGradOptimizer + :noindex: + +DecayedAdaGradOptimizer +======================= +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: DecayedAdaGradOptimizer + :noindex: + +AdaDeltaOptimizer +================= +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: AdaDeltaOptimizer + :noindex: + +RMSPropOptimizer +================ +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: RMSPropOptimizer + :noindex: + +settings +======== +.. automodule:: paddle.trainer_config_helpers.optimizers + :members: settings + :noindex: diff --git a/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/poolings.txt b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/poolings.txt new file mode 100644 index 0000000000000000000000000000000000000000..66566809d26f59263597b5286c5b27e0bbc9415a --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/api/trainer_config_helpers/poolings.txt @@ -0,0 +1,33 @@ +======== +Poolings +======== + +BasePoolingType +=============== +.. automodule:: paddle.trainer_config_helpers.poolings + :members: BasePoolingType + :noindex: + +AvgPooling +========== +.. automodule:: paddle.trainer_config_helpers.poolings + :members: AvgPooling + :noindex: + +MaxPooling +========== +.. automodule:: paddle.trainer_config_helpers.poolings + :members: MaxPooling + :noindex: + +SumPooling +========== +.. automodule:: paddle.trainer_config_helpers.poolings + :members: SumPooling + :noindex: + +SquareRootNPooling +================== +.. automodule:: paddle.trainer_config_helpers.poolings + :members: SquareRootNPooling + :noindex: diff --git a/release_doc/0.9.0/doc/_sources/ui/cmd_argument/argument_outline.txt b/release_doc/0.9.0/doc/_sources/ui/cmd_argument/argument_outline.txt new file mode 100644 index 0000000000000000000000000000000000000000..d6cc2c6ed7cc1b9209d56b4348497427efe40ac3 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/cmd_argument/argument_outline.txt @@ -0,0 +1,409 @@ +# Argument Outline + +It looks like there are a lot of arguments. However, most of them are for developers or alrealy set automatically in cluster submitting environment and users do not need to care about them. Here, we divide these arguments into serveral classes according to the scenario that they are used in. For example, the arguments in `common` can be used in all scenes. Some arguments can be only used in certain layers. Some are needed by multi machines training in cluster, etc. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +√ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
argslocal traincluster trainlocal testcluster test
commonjob
use_gpu
local
config
config_args
num_passes
trainer_count
version
show_layer_stat
traindot_period
test_period
saving_period
show_parameter_stats_period
init_model_path
load_missing_parameter_strategy
saving_period_by_batches
use_old_updater
enable_grad_share
grad_share_block_num
log_error_clipping
log_clipping
save_only_one
allow_inefficient_sparse_update
start_pass
train/testsave_dir
testing during trainingtest_all_data_in_one_period
average_test_period
testmodel_list
test_wait
test_pass
predict_output_dir
distribute_test
Auc/PnpairValidationpredict_file
GPUgpu_id
parallel_nn
allow_only_one_model_on_one_gpu
cudnn_dir
cuda_dir
cudnn_conv_workspace_limit_in_mb
RNNbeam_size
rnn_use_batch
prev_batch_state
diy_beam_search_prob_so
metric learningexternal
data_server_port
PServerstart_pserver
pservers
port
port_num
ports_num_for_sparse
nics
rdma_tcp
small_messages
loadsave_parameters_in_pserver
log_period_server
pserver_num_threads
sock_send_buf_size
sock_recv_buf_size
num_gradient_servers
parameter_block_size
parameter_block_size_for_sparse
Async SGDasync_count
async_lagged_ratio_min
async_lagged_ratio_default
Performance Tuninglog_barrier_abstract
log_barrier_lowest_nodes
log_barrier_show_log
check_sparse_distribution_batches
check_sparse_distribution_ratio
check_sparse_distribution_unbalance_degree
check_sparse_distribution_in_pserver
show_check_sparse_distribution_log
Data Providermemory_threshold_on_load_data
RandomNumberseed
thread_local_rand_use_global_seed
UnitTestcheckgrad_eps
Matrix/Vectorenable_parallel_vector
+ diff --git a/release_doc/0.9.0/doc/_sources/ui/cmd_argument/detail_introduction.txt b/release_doc/0.9.0/doc/_sources/ui/cmd_argument/detail_introduction.txt new file mode 100644 index 0000000000000000000000000000000000000000..07608e5edf740bd3e1242913f1d2d7589ad313aa --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/cmd_argument/detail_introduction.txt @@ -0,0 +1,340 @@ +# Detail Description + +## Common + +* `--job` + - Job mode, including: **train, test, checkgrad**, where checkgrad is mainly for developers and users do not need to care about. + - type: string (default: train) + +* `--config` + - Use to specfiy network configure file. + - type: string (default: null). + +* `--use_gpu` + - Whether to use GPU for training, false is cpu mode and true is gpu mode. + - type: bool (default: 1). + +* `--local` + - Whether the training is in local mode or not. True when training locally or using one node in cluster. False when using multiple machines in cluster. + - type: bool (default: 1). + +* `--trainer_count` + - Define the number of threads used in one machine. For example, trainer_count = 4, means use 4 GPU in GPU mode and 4 threads in CPU mode. Each thread (or GPU) is assigned to 1/4 samples in current batch. That is to say, if setting batch_size of 512 in trainer config, each thread train 128 samples. + - type: int32 (default: 1). + +* `--num_passes` + - When `--job=train`, means training for num_passes passes. One pass means training all samples in dataset one time. When `--job=test`, means testing data from model of test_pass to model of (num_passes - 1). + - type: int32 (default: 100). + +* `--config_args` + - arguments passed to config file. Format: key1=value1,key2=value2. + - type: string (default: null). + +* `--version` + - Whether to print version infomatrion. + - type: bool (default: 0). + +* `--show_layer_stat` + - Whether to show the statistics of each layer **per batch**. + - type: bool (default: 0). + +## Train + +* `--log_period` + - Log progress every log_period batches. + - type: int32 (default: 100). + +* `--dot_period` + - Print '.' every dot_period batches. + - type: int32 (default: 1). + +* `--saving_period` + - Save parameters every saving_period passes + - type: int32 (default: 1). + +* `--save_dir` + - Directory for saving model parameters. It needs to be specified, but no need to be created in advance. + - type: string (default: null). + +* `--start_pass` + - Start training from this pass. It will load parameters from the previous pass. + - type: int32 (default: 0). + +* `--show_parameter_stats_period` + - Show parameter statistic during training every show_parameter_stats_period batches. It will not show by default. + - type: int32 (default: 0). + +* `--save_only_one` + - Save the parameters only in last pass, while the previous parameters will be removed. + - type: bool (default: 0). + +* `--load_missing_parameter_strategy` + - Specify the loading operation when model file is missing. Now support fail/rand/zere three operations. + - `fail`: program will exit. + - `rand`: uniform or normal distribution according to **initial\_strategy** in network config. Uniform range is: **[mean - std, mean + std]**, where mean and std are configures in trainer config. + - `zero`: all parameters are zero. + - type: string (default: fail). + +* `--init_model_path` + - Path of the initialization model. If it was set, start\_pass will be ignored. It can be used to specify model path in testing mode as well. + - type: string (default: null). + +* `--saving_period_by_batches` + - Save parameters every saving_period_by_batches batches in one pass. + - type: int32 (default: 0). + +* `--log_error_clipping` + - Whether to print error clipping log when setting **error_clipping_threshold** in layer config. If it is true, log will be printed in backward propagation **per batch**. This clipping effects on **gradient of output**. + - type: bool (default: 0). + +* `--log_clipping` + - Enable print log clipping or not when setting **gradient_clipping_threshold** in trainer config. This clipping effects on **gradient w.r.t. (with respect to) weight**. + - type: bool (default: 0). + +* `--use_old_updater` + - Whether to use the old RemoteParameterUpdater. Default use ConcurrentRemoteParameterUpdater. It is mainly for deverlopers and users usually do not need to care about. + - type: bool (default: 0). + +* `--enable_grad_share` + - threshold for enable gradient parameter, which is shared for batch multi-cpu training. + - type: int32 (default: 100 \* 1024 \* 1024). + +* `--grad_share_block_num` + - block number of gradient parameter, which is shared for batch multi-cpu training. + - type: int32 (default: 64). + +## Test + +* `--test_pass` + - Load parameter from this pass to test. + - type: int32 (default: -1). + +* `--test_period` + - Run testing every test_period train batches. If not set, run testing each pass. + - type: int32 (default: 1000). + +* `--test_wait` + - Whether to wait for parameter per pass if not exist. If set test_data_path in submitting environment of cluster, it will launch one process to perfom testing, so we need to set test_wait=1. Note that in the cluster submitting environment, this argument has been set True by default. + - type: bool (default: 0). + +* `--model_list` + - File that saves the model list when testing. It was set automatically when using cluster submitting environment after setting model_path. + - type: string (default: "", null). + +* `--test_all_data_in_one_period` + - This argument is usually used in testing period during traning. If true, all data will be tested in one test period. Otherwise (batch_size * log_peroid) data will be tested. + - type: bool (default: 0). + +* `--predict_output_dir` + - Directory that saves the layer output. It is configured in Outputs() in network config. Default, this argument is null, meaning save nothing. Specify this directory if you want to save feature map of some layers in testing mode. Note that, layer outputs are values after activation function. + - type: string (default: "", null). + +* `--average_test_period` + - Do test on average parameter every `average_test_period` batches. It MUST be devided by FLAGS_log_period. Default 0 means do not test on average parameter. + - type: int32 (default: 0). + +* `--distribute_test` + - Testing in distribute environment will merge results from multiple machines. + - type: bool (default: 0). + +* `--predict_file` + - File name for saving predicted result. Default, this argument is null, meaning save nothing. Now, this argument is only used in AucValidationLayer and PnpairValidationLayer, and saves predicted result every pass. + - type: string (default: "", null). + +## GPU + +* `--gpu_id` + - Which gpu core to use. + - type: int32 (default: 0). + +* `--allow_only_one_model_on_one_gpu` + - If true, do not allow multiple models on one GPU device. + - type: bool (default: 1). + +* `--parallel_nn` + - Whether to use multi-thread to calculate one neural network or not. If false, use gpu_id specify which gpu core to use (the device property in trainer config will be ingored). If true, the gpu core is specified in trainer config (gpu_id will be ignored). + - type: bool (default: 0). + +* `--cudnn_dir` + - Choose path to dynamic load NVIDIA CuDNN library, for instance, /usr/local/cuda/lib64. [Default]: LD_LIBRARY_PATH + - type: string (default: "", null) + +* `--cuda_dir` + - Choose path to dynamic load NVIDIA CUDA library, for instance, /usr/local/cuda/lib64. [Default]: LD_LIBRARY_PATH + - type: string (default: "", null) + +* `--cudnn_conv_workspace_limit_in_mb` + - Specify cuDNN max workspace limit, in units MB, 4096MB=4GB by default. + - type: int32 (default: 4096MB=4GB) + +## NLP: RNN/LSTM/GRU +* `--rnn_use_batch` + - Whether to use batch method for calculation in simple RecurrentLayer. + - type: bool (default: 0). + +* `--prev_batch_state` + - batch is continue with next batch. + - type: bool (default: 0). + +* `--beam_size` + - Beam search uses breadth-first search to build its search tree. At each level of the tree, it generates all successors of the states at the current level, sorting them in increasing order of heuristic cost. However, it only stores a predetermined number of best states at each level (called the beam size). + - type: int32 (default: 1). + +* `--diy_beam_search_prob_so` + - Specify shared dynamic library. It can be defined out of paddle by user. + - type: string (default: "", null). + +## Metric Learning +* `--external` + - Whether to use external machine for metric learning. + - type: bool (default: 0). + +* `--data_server_port` + - Listening port for dserver (data server), dserver is mainly used in metric learning. + - type: int32 (default: 21134). + +## DataProvider + +* `--memory_threshold_on_load_data` + - Stop loading data when memory is not sufficient. + - type: double (default: 1.0). + +## Unit Test + +* `--checkgrad_eps` + - parameter change size for checkgrad. + - type: double (default: 1e-05). + +## Parameter Server and Distributed Communication + +* `--start_pserver` + - Whether to start pserver (parameter server). + - type: bool (default: 0). + +* `--pservers` + - Comma separated IP addresses of pservers. It is set automatically in cluster submitting environment. + - type: string (default: "127.0.0.1"). + +* `--port` + - Listening port for pserver. + - type: int32 (default: 20134). + +* `--ports_num` + - The ports number for parameter send, increment based on default port number. + - type: int32 (default: 1). + +* `--trainer_id` + - In distributed training, each trainer must be given an unique id ranging from 0 to num_trainers-1. Trainer 0 is the master trainer. User do not need to care this flag. + - type: int32 (default: 0). + +* `--num_gradient_servers` + - Numbers of gradient servers. This arguments is set automatically in cluster submitting environment. + - type: int32 (default: 1). + +* `--small_messages` + - If message size is small, recommend set it True to enable quick ACK and no delay + - type: bool (default: 0). + +* `--sock_send_buf_size` + - Restrict socket send buffer size. It can reduce network congestion if set carefully. + - type: int32 (default: 1024 \* 1024 \* 40). + +* `--sock_recv_buf_size` + - Restrict socket recieve buffer size. + - type: int32 (default: 1024 \* 1024 \* 40). + +* `--parameter_block_size` + - Parameter block size for pserver, will automatically calculate a suitable value if it's not set. + - type: int32 (default: 0). + +* `--parameter_block_size_for_sparse` + - Parameter block size for sparse update pserver, will automatically calculate a suitable value if it's not set. + - type: int32 (default: 0). + +* `--log_period_server` + - Log progress every log_period_server batches at pserver end. + - type: int32 (default: 500). + +* `--loadsave_parameters_in_pserver` + - Load and save parameters in pserver. Only work when parameter set sparse_remote_update. + - type: bool (default: 0). + +* `--pserver_num_threads` + - number of threads for sync op exec. + - type: bool (default: 1). + +* `--ports_num_for_sparse` + - The ports number for parameter send, increment based on default (port + ports_num). It is used by sparse Tranning. + - type: int32 (default: 0). + +* `--nics` + - Network device name for pservers, already set in cluster submitting environment. + - type: string (default: "xgbe0,xgbe1"). + +* `--rdma_tcp` + - Use rdma or tcp transport protocol, already set in cluster submitting environment. + - type: string (default: "tcp"). + +## Async SGD +* `--async_count` + - Defined the asynchronous training length, if 0, then use synchronized training. + - type: int32 (default: 0). + +* `--async_lagged_ratio_min` + - Control the minimize value of `config_.async_lagged_grad_discard_ratio()`. + - type: double (default: 1.0). + +* `--async_lagged_ratio_default` + - If async_lagged_grad_discard_ratio is not set in network config, use it as defalut value. + - type: double (default: 1.5). + +## Performance Tuning + +* `--log_barrier_abstract` + - If true, show abstract barrier performance information. + - type: bool (default: 1). + +* `--log_barrier_show_log` + - If true, always show barrier abstract even with little gap. + - type: bool (default: 0). + +* `--log_barrier_lowest_nodes` + - How many lowest node will be logged. + - type: int32 (default: 5). + +* `--check_sparse_distribution_in_pserver` + - Whether to check that the distribution of sparse parameter on all pservers is balanced. + - type: bool (default: 0). + +* `--show_check_sparse_distribution_log` + - show log details for sparse parameter distribution in pserver. + - type: bool (default: 0). + +* `--allow_inefficient_sparse_update` + - Whether to allow inefficient sparse update. + - type: bool (default: 0). + +* `--check_sparse_distribution_batches` + - Running sparse parameter distribution check every so many batches. + - type: int32 (default: 100). + +* `--check_sparse_distribution_ratio` + - If parameters dispatched to different pservers have an unbalanced distribution for check_sparse_distribution_ratio * check_sparse_distribution_batches times, crash program. + - type: double (default: 0.6). + +* `--check_sparse_distribution_unbalance_degree` + - The ratio of maximum data size / minimun data size for different pserver. + - type: double (default: 2). + +## Matrix/Vector/RandomNumber +* `--enable_parallel_vector` + - threshold for enable parallel vector. + - type: int32 (default: 0). + +* `--seed` + - random number seed. 0 for srand(time) + - type: int32 (default: 1) + +* `--thread_local_rand_use_global_seed` + - Whether to use global seed in rand of thread local. + - type: bool (default: 0). diff --git a/release_doc/0.9.0/doc/_sources/ui/cmd_argument/use_case.txt b/release_doc/0.9.0/doc/_sources/ui/cmd_argument/use_case.txt new file mode 100644 index 0000000000000000000000000000000000000000..a6bfba29af4f73055338c3a671bcafaa1456c7cf --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/cmd_argument/use_case.txt @@ -0,0 +1,183 @@ +# Use Case + +## Local Training + +These command line arguments are commonly used by local training experiments, such as image classification, natural language processing, et al. + +``` +paddle train \ + --use_gpu=1/0 \ #1:GPU,0:CPU(default:true) + --config=network_config \ + --save_dir=output \ + --trainer_count=COUNT \ #(default:1) + --test_period=M \ #(default:1000) + --test_all_data_in_one_period=true \ #(default:false) + --num_passes=N \ #(defalut:100) + --log_period=K \ #(default:100) + --dot_period=1000 \ #(default:1) + #[--show_parameter_stats_period=100] \ #(default:0) + #[--saving_period_by_batches=200] \ #(default:0) +``` +`show_parameter_stats_period` and `saving_period_by_batches` are optional according to your task. + +### 1) Pass Command Argument to Network config + +`config_args` is a useful parameter to pass arguments to network config. + +``` +--config_args=generating=1,beam_size=5,layer_num=10 \ +``` +And `get_config_arg` can be used to parse these arguments in network config as follows: + +``` +generating = get_config_arg('generating', bool, False) +beam_size = get_config_arg('beam_size', int, 3) +layer_num = get_config_arg('layer_num', int, 8) +``` + +`get_config_arg`: + +``` +get_config_arg(name, type, default_value) +``` +- name: the name specified in the `--config_args` +- type: value type, bool, int, str, float etc. +- default_value: default value if not set. + +### 2) Use Model to Initialize Network + +add argument: + +``` +--init_model_path=model_path +--load_missing_parameter_strategy=rand +``` + +## Local Testing + +Method 1: + +``` +paddle train --job=test \ + --use_gpu=1/0 \ + --config=network_config \ + --trainer_count=COUNT \ + --init_model_path=model_path \ +``` +- use init\_model\_path to specify test model. +- only can test one model. + +Method 2: + +``` +paddle train --job=test \ + --use_gpu=1/0 \ + --config=network_config \ + --trainer_count=COUNT \ + --model_list=model.list \ +``` +- use model_list to specify test models +- can test several models, where model.list likes: + +``` +./alexnet_pass1 +./alexnet_pass2 +``` + +Method 3: + +``` +paddle train --job=test \ + --use_gpu=1/0 \ + --config=network_config \ + --trainer_count=COUNT \ + --save_dir=model \ + --test_pass=M \ + --num_passes=N \ +``` +This way must use model path saved by Paddle like this: `model/pass-%5d`. Testing model is from M-th pass to (N-1)-th pass. For example: M=12 and N=14 will test `model/pass-00012` and `model/pass-00013`. + +## Sparse Training + +Sparse training is usually used to accelerate calculation when input is sparse data with highly dimension. For example, dictionary dimension of input data is 1 million, but one sample just have several words. In paddle, sparse matrix multiplication is used in forward propagation and sparse updating is perfomed on weight updating after backward propagation. + +### 1) Local training + +You need to set **sparse\_update=True** in network config. Check the network config documentation for more details. + +### 2) cluster training + +Add the following argument for cluster training of a sparse model. At the same time you need to set **sparse\_remote\_update=True** in network config. Check the network config documentation for more details. + +``` +--ports_num_for_sparse=1 #(default: 0) +``` + +## parallel_nn +`parallel_nn` can be set to mixed use of GPUs and CPUs to compute layers. That is to say, you can deploy network to use a GPU to compute some layers and use a CPU to compute other layers. The other way is to split layers into different GPUs, which can **reduce GPU memory** or **use parallel computation to accelerate some layers**. + +If you want to use these characteristics, you need to specify device ID in network config (denote it as deviceId) and add command line argument: + +``` +--parallel_nn=true +``` +### case 1: Mixed Use of GPU and CPU +Consider the following example: + +``` +#command line: +paddle train --use_gpu=true --parallel_nn=true trainer_count=COUNT + +default_device(0) + +fc1=fc_layer(...) +fc2=fc_layer(...) +fc3=fc_layer(...,layer_attr=ExtraAttr(device=-1)) + +``` +- default_device(0): set default device ID to 0. This means that except the layers with device=-1, all layers will use a GPU, and the specific GPU used for each layer depends on trainer\_count and gpu\_id (0 by default). Here, layer l1 and l2 are computed on the GPU. + +- device=-1: use the CPU for layer l3. + +- trainer_count: + - trainer_count=1: if gpu\_id is not set, then use the first GPU to compute layers l1 and l2. Otherwise use the GPU with gpu\_id. + + - trainer_count>1: use trainer\_count GPUs to compute one layer using data parallelism. For example, trainer\_count=2 means that GPUs 0 and 1 will use data parallelism to compute layer l1 and l2. + +### Case 2: Specify Layers in Different Devices + +``` +#command line: +paddle train --use_gpu=true --parallel_nn=true --trainer_count=COUNT + +#network: +fc2=fc_layer(input=l1, layer_attr=ExtraAttr(device=0), ...) +fc3=fc_layer(input=l1, layer_attr=ExtraAttr(device=1), ...) +fc4=fc_layer(input=fc2, layer_attr=ExtraAttr(device=-1), ...) +``` +In this case, we assume that there are 4 GPUs in one machine. + +- trainer_count=1: + - Use GPU 0 to compute layer l2. + - Use GPU 1 to compute layer l3. + - Use CPU to compute layer l4. + +- trainer_count=2: + - Use GPU 0 and 1 to compute layer l2. + - Use GPU 2 and 3 to compute layer l3. + - Use CPU to compute l4 in two threads. + +- trainer_count=4: + - It will fail (note, we have assumed that there are 4 GPUs in machine), because argument `allow_only_one_model_on_one_gpu` is true by default. + +**Allocation of device ID when `device!=-1`**: + +``` +(deviceId + gpu_id + threadId * numLogicalDevices_) % numDevices_ + +deviceId: specified in layer. +gpu_id: 0 by default. +threadId: thread ID, range: 0,1,..., trainer_count-1 +numDevices_: device (GPU) count in machine. +numLogicalDevices_: min(max(deviceId + 1), numDevices_) +``` diff --git a/release_doc/0.9.0/doc/_sources/ui/data_provider/index.txt b/release_doc/0.9.0/doc/_sources/ui/data_provider/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..3db5b57376257b83fc2a27c518b0db663682136d --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/data_provider/index.txt @@ -0,0 +1,42 @@ +DataProvider Introduction +========================= +DataProvider is a module that loads training or testing data into cpu or gpu +memory for the following triaining or testing process. + +For simple use, users can use Python :code:`PyDataProvider` to dynamically reads +the original data in any format or in any form, and then transfer them into a +data format PaddlePaddle requires. The process is extremly flexible and highly +customized, with sacrificing the efficiency only a little. This is extremly +useful when you have to dynamically generate certain kinds of data according to, +for example, the training performance. + +Besides, users also can customize a C++ :code:`DataProvider` for a more +complex usage, or for a higher efficiency. + +The following parameters are required to define in the PaddlePaddle network +configuration file (trainer_config.py): which DataProvider is chosen to used, +and specific parameters for DataProvider, including training file list +(train.list) and testing file list (test.list). + +Train.list and test.list are simply two plain text files, which defines path +of training or testing data. It is recommended that directly placing them into +the training directory, and reference to them by using a relative path ( +relative to the PaddePaddle program). + +Testing or evaluating will not be performed during training if the test.list is +not set or set to None. Otherwise, PaddlePaddle will evaluate the trained model +by the specified tesing data while training, every testing period (a user +defined command line parameter in PaddlePaddle) to prevent over-fitting. + +Each line of train.list and test.list is an absolute or relative path (relative +to the PaddePaddle program runtime) of data file. Fascinatingly more, each line +can also be a HDFS file path or a SQL connection string. As long as the user +assures how to access each file in DataProvider. + +Please refer to the following articles for more information about the detail +usages of DataProvider and how to implement a new DataProvider, + +.. toctree:: + + pydataprovider2.rst + write_new_dataprovider.rst diff --git a/release_doc/0.9.0/doc/_sources/ui/data_provider/pydataprovider2.txt b/release_doc/0.9.0/doc/_sources/ui/data_provider/pydataprovider2.txt new file mode 100644 index 0000000000000000000000000000000000000000..e105d3be308705d228c0b188e15742a0f7325ab6 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/data_provider/pydataprovider2.txt @@ -0,0 +1,245 @@ +How to use PyDataProvider2 +========================== + +We highly recommand users to use PyDataProvider2 to provide training or testing +data to PaddlePaddle. The user only needs to focus on how to read a single +sample from the original data file by using PyDataProvider2, leaving all of the +trivial work, including, transfering data into cpu/gpu memory, shuffle, binary +serialization to PyDataProvider2. PyDataProvider2 uses multithreading and a +fanscinating but simple cache strategy to optimize the efficiency of the data +providing process. + +DataProvider for the non-sequential model +----------------------------------------- + +Here we use the MNIST handwriting recognition data as an example to illustrate +how to write a simple PyDataProvider. + +MNIST is a handwriting classification data set. It contains 70,000 digital +grayscale images. Labels of the training sample range from 0 to 9. All the +images have been size-normalized and centered into images with the same size +of 28 x 28 pixels. + +A small part of the original data as an example is shown as below: + +.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_train.txt + +Each line of the data contains two parts, separated by :code:`;`. The first part is +label of an image. The second part contains 28x28 pixel float values. + +Just write path of the above data into train.list. It looks like this: + +.. literalinclude:: ../../../doc_cn/ui/data_provider/train.list + +The corresponding dataprovider is shown as below: + +.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_provider.py + +The first line imports PyDataProvider2 package. +The main function is the process function, that has two parameters. +The first parameter is the settings, which is not used in this example. +The second parameter is the filename, that is exactly each line of train.list. +This parameter is passed to the process function by PaddlePaddle. + +:code:`@provider` is a Python +`Decorator `_ . +It sets some properties to DataProvider, and constructs a real PaddlePaddle +DataProvider from a very simple user implemented python function. It does not +matter if you are not familiar with `Decorator`_. You can keep it simple by +just taking :code:`@provider` as a fixed mark above the provider function you +implemented. + +`input_types`_ defines the data format that a DataProvider returns. +In this example, it is set to a 28x28-dimensional dense vector and an integer +scalar, whose value ranges from 0 to 9. +`input_types`_ can be set to several kinds of input formats, please refer to the +document of `input_types`_ for more details. + + +The process method is the core part to construct a real DataProvider in +PaddlePaddle. It implements how to open the text file, how to read one sample +from the original text file, convert them into `input_types`_, and give them +back to PaddlePaddle process at line 23. +Note that data yielded by the process function must follow the same order that +`input_types`_ are defined. + + +With the help of PyDataProvider2, user can focus on how to generate ONE traning +sample by using keywords :code:`yield`. +:code:`yield` is a python keyword, and a concept related to it includes +:code:`generator`. + +Only a few lines of codes need to be added into the training configuration file, +you can take this as an example. + +.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_config.py + +Here we specify training data by :code:`train.list`, and no testing data is specified. +The method which actually provide data is :code:`process`. + +User also can use another style to provide data, which defines the +:code:`data_layer`'s name explicitly when `yield`. For example, +the :code:`dataprovider` is shown as below. + +.. literalinclude:: ../../../doc_cn/ui/data_provider/mnist_provider.dict.py + :linenos: + +If user did't give the :code:`data_layer`'s name, PaddlePaddle will use +the order of :code:`data_layer` definition roughly to determine which feature to +which :code:`data_layer`. This order may be not correct, so TO DEFINE THE +:code:`data_layer`'s NAMES EXPLICITLY IS THE RECOMMANDED WAY TO PROVIDER DATA. + +Now, this simple example of using PyDataProvider is finished. +The only thing that the user should know is how to generte **one sample** from +**one data file**. +And PaddlePadle will do all of the rest things\: + +* Form a training batch +* Shuffle the training data +* Read data with multithreading +* Cache the training data (Optional) +* CPU-> GPU double buffering. + +Is this cool? + +DataProvider for the sequential model +------------------------------------- +A sequence model takes sequences as its input. A sequence is made up of several +timesteps. The so-called timestep, is not necessary to have something to do +with time. It can also be explained to that the order of data are taken into +consideration into model design and training. +For example, the sentence can be interpreted as a kind of sequence data in NLP +tasks. + +Here is an example on data proivider for English sentiment classification data. +The original input data are simple English text, labeled into positive or +negative sentiment (marked by 0 and 1 respectively). + +A small part of the original data as an example can be found in the path below: + +.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_train.txt + +The corresponding data provider can be found in the path below: + +.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_provider.py + +This data provider for sequential model is a little more complex than that +for MINST dataset. +A new initialization method is introduced here. +The method :code:`on_init` is configured to DataProvider by :code:`@provider`'s +:code:`init_hook` parameter, and it will be invoked once DataProvider is +initialized. The :code:`on_init` function has the following parameters: + +* The first parameter is the settings object. +* The rest parameters are passed by key word arguments. Some of them are passed + by PaddlePaddle, see reference for `init_hook`_. + The :code:`dictionary` object is a python dict object passed from the trainer + configuration file, and it maps word string to word id. + +To pass these parameters into DataProvider, the following lines should be added +into trainer configuration file. + +.. literalinclude:: ../../../doc_cn/ui/data_provider/sentimental_config.py + +The definition is basically same as MNIST example, except: +* Load dictionary in this configuration +* Pass it as a parameter to the DataProvider + +The `input_types` is configured in method :code:`on_init`. It has the same +effect to configure them by :code:`@provider`'s :code:`input_types` parameter. +However, the :code:`input_types` is set at runtime, so we can set it to +different types according to the input data. Input of the neural network is a +sequence of word id, so set :code:`seq_type` to :code:`integer_value_sequence`. + +Durning :code:`on_init`, we save :code:`dictionary` variable to +:code:`settings`, and it will be used in :code:`process`. Note the settings +parameter for the process function and for the on_init's function are a same +object. + +The basic processing logic is the same as MNIST's :code:`process` method. Each +sample in the data file is given back to PaddlePaddle process. + +Thus, the basic usage of PyDataProvider is here. +Please refer to the following section reference for details. + +Reference +--------- + +@provider ++++++++++ + +.. autofunction:: paddle.trainer.PyDataProvider2.provider + +input_types ++++++++++++ + +PaddlePaddle has four data types, and three sequence types. +The four data types are: + +* :code:`dense_vector`: dense float vector. +* :code:`sparse_binary_vector`: sparse binary vector, most of the value is 0, and + the non zero elements are fixed to 1. +* :code:`sparse_float_vector`: sparse float vector, most of the value is 0, and some + non zero elements can be any float value. They are given by the user. +* :code:`integer`: an integer scalar, that is especially used for label or word index. + +The three sequence types are: + +* :code:`SequenceType.NO_SEQUENCE` means the sample is not a sequence. +* :code:`SequenceType.SEQUENCE` means the sample is a sequence. +* :code:`SequenceType.SUB_SEQUENCE` means it is a nested sequence, that each timestep of + the input sequence is also a sequence. + +Different input type has a defferenct input format. Their formats are shown +in the above table. + ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ +| | NO_SEQUENCE | SEQUENCE | SUB_SEQUENCE | ++======================+=====================+===================================+================================================+ +| dense_vector | [f, f, ...] | [[f, ...], [f, ...], ...] | [[[f, ...], ...], [[f, ...], ...],...] | ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ +| sparse_binary_vector | [i, i, ...] | [[i, ...], [i, ...], ...] | [[[i, ...], ...], [[i, ...], ...],...] | ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ +| sparse_float_vector | [(i,f), (i,f), ...] | [[(i,f), ...], [(i,f), ...], ...] | [[[(i,f), ...], ...], [[(i,f), ...], ...],...] | ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ +| integer_value | i | [i, i, ...] | [[i, ...], [i, ...], ...] | ++----------------------+---------------------+-----------------------------------+------------------------------------------------+ + +where f represents a float value, i represents an integer value. + +init_hook ++++++++++ + +init_hook is a function that is invoked once the data provoder is initialized. +Its parameters lists as follows: + +* The first parameter is a settings object, which is the same to :code:`settings` + in :code:`process` method. The object contains several attributes, including: + + * :code:`settings.input_types`: the input types. Reference `input_types`_. + * :code:`settings.logger`: a logging object. + +* The rest parameters are the key word arguments. It is made up of PaddpePaddle + pre-defined parameters and user defined parameters. + + * PaddlePaddle-defined parameters including: + + * :code:`is_train` is a bool parameter that indicates the DataProvider is used in + training or testing. + * :code:`file_list` is the list of all files. + + * User-defined parameters args can be set in training configuration. + +Note, PaddlePaddle reserves the right to add pre-defined parameter, so please +use :code:`**kwargs` in init_hook to ensure compatibility by accepting the +parameters which your init_hook does not use. + +cache ++++++ +DataProvider provides two simple cache strategy. They are: + +* :code:`CacheType.NO_CACHE` means do not cache any data, then data is read at runtime by + the user implemented python module every pass. +* :code:`CacheType.CACHE_PASS_IN_MEM` means the first pass reads data by the user + implemented python module, and the rest passes will directly read data from + memory. diff --git a/release_doc/0.9.0/doc/_sources/ui/index.txt b/release_doc/0.9.0/doc/_sources/ui/index.txt new file mode 100644 index 0000000000000000000000000000000000000000..9c1ba27bdc14fa9ab762ffb97424a8a5946808f9 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/index.txt @@ -0,0 +1,20 @@ +# User Interface + +## Data Provider + +* [Introduction](data_provider/index.rst) +* [PyDataProvider2](data_provider/pydataprovider2.rst) + +## API Reference + +* [Model Config Interface](api/trainer_config_helpers/index.md) + +## Command Line Argument + +* [Use Case](cmd_argument/use_case.md) +* [Argument Outline](cmd_argument/argument_outline.md) +* [Detailed Descriptions](cmd_argument/detail_introduction.md) + +## Predict + +* [Python Prediction API](predict/swig_py_paddle_en.rst) diff --git a/release_doc/0.9.0/doc/_sources/ui/predict/swig_py_paddle_en.txt b/release_doc/0.9.0/doc/_sources/ui/predict/swig_py_paddle_en.txt new file mode 100644 index 0000000000000000000000000000000000000000..b743fc456914664168e1be6c7f18a419c38afa62 --- /dev/null +++ b/release_doc/0.9.0/doc/_sources/ui/predict/swig_py_paddle_en.txt @@ -0,0 +1,59 @@ +Python Prediction API +===================== + +PaddlePaddle offers a set of clean prediction interfaces for python with the help of +SWIG. The main steps of predict values in python are: + +* Parse training configurations +* Construct GradientMachine +* Prepare data +* Predict + +Here is a sample python script that shows the typical prediction process for the +MNIST classification problem. A complete sample code could be found at +:code:`src_root/doc/ui/predict/predict_sample.py`. + +.. literalinclude:: ./predict_sample.py + :language: python + :lines: 15-18,90-100,101-104 + +The module that does the most of the job is py_paddle.swig_paddle, it's +generated by SWIG and has complete documents, for more details you can use +python's :code:`help()` function. Let's walk through the above python script: + +* At the beginning, use :code:`swig_paddle.initPaddle()` to initialize + PaddlePaddle with command line arguments, for more about command line arguments + see `Command Line Arguments <../cmd_argument/detail_introduction.html>`_. +* Parse the configuration file that is used in training with :code:`parse_config()`. + Because data to predict with always have no label, and output of prediction work + normally is the output layer rather than the cost layer, so you should modify + the configuration file accordingly before using it in the prediction work. +* Create a neural network with + :code:`swig_paddle.GradientMachine.createFromConfigproto()`, which takes the + parsed configuration :code:`conf.model_config` as argument. Then load the + trained parameters from the model with :code:`network.loadParameters()`. +* Create a data converter object of utility class :code:`DataProviderConverter`. + - Note: As swig_paddle can only accept C++ matrices, we offer a utility + class DataProviderConverter that can accept the same input data with + PyDataProvider2, for more information please refer to document + of `PyDataProvider2 <../data_provider/pydataprovider2.html>`_. +* Do the prediction with :code:`forwardTest()`, which takes the converted + input data and outputs the activations of the output layer. + +Here is a typical output: + +.. code-block:: text + + [{'id': None, 'value': array([[ 5.53018653e-09, 1.12194102e-05, 1.96644767e-09, + 1.43630644e-02, 1.51111044e-13, 9.85625684e-01, + 2.08823112e-10, 2.32777140e-08, 2.00186201e-09, + 1.15501715e-08], + [ 9.99982715e-01, 1.27787406e-10, 1.72296313e-05, + 1.49316648e-09, 1.36540484e-11, 6.93137714e-10, + 2.70634608e-08, 3.48565123e-08, 5.25639710e-09, + 4.48684503e-08]], dtype=float32)}] + +:code:`value` is the output of the output layer, each row represents result of +the corresponding row in the input data, each element represents activation of +the corresponding neuron in the output layer. + diff --git a/release_doc/0.9.0/doc/_static/ajax-loader.gif b/release_doc/0.9.0/doc/_static/ajax-loader.gif new file mode 100644 index 0000000000000000000000000000000000000000..61faf8cab23993bd3e1560bff0668bd628642330 Binary files /dev/null and b/release_doc/0.9.0/doc/_static/ajax-loader.gif differ diff --git a/release_doc/0.9.0/doc/_static/basic.css b/release_doc/0.9.0/doc/_static/basic.css new file mode 100644 index 0000000000000000000000000000000000000000..0b79414a16adfcf062bf14fc3c040bc335eb79ad --- /dev/null +++ b/release_doc/0.9.0/doc/_static/basic.css @@ -0,0 +1,611 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox input[type="text"] { + width: 170px; +} + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable dl, table.indextable dd { + margin-top: 0; + margin-bottom: 0; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.field-list ul { + padding-left: 1em; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.field-list td, table.field-list th { + border: 0 !important; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, .highlighted { + background-color: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/release_doc/0.9.0/doc/_static/classic.css b/release_doc/0.9.0/doc/_static/classic.css new file mode 100644 index 0000000000000000000000000000000000000000..d98894b3f666966797bc7444b3c43bfcfa4e1bc1 --- /dev/null +++ b/release_doc/0.9.0/doc/_static/classic.css @@ -0,0 +1,261 @@ +/* + * default.css_t + * ~~~~~~~~~~~~~ + * + * Sphinx stylesheet -- default theme. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: sans-serif; + font-size: 100%; + background-color: #11303d; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + background-color: #1c4e63; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +div.body { + background-color: #ffffff; + color: #000000; + padding: 0 20px 30px 20px; +} + +div.footer { + color: #ffffff; + width: 100%; + padding: 9px 0 9px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #ffffff; + text-decoration: underline; +} + +div.related { + background-color: #133f52; + line-height: 30px; + color: #ffffff; +} + +div.related a { + color: #ffffff; +} + +div.sphinxsidebar { +} + +div.sphinxsidebar h3 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.4em; + font-weight: normal; + margin: 0; + padding: 0; +} + +div.sphinxsidebar h3 a { + color: #ffffff; +} + +div.sphinxsidebar h4 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.3em; + font-weight: normal; + margin: 5px 0 0 0; + padding: 0; +} + +div.sphinxsidebar p { + color: #ffffff; +} + +div.sphinxsidebar p.topless { + margin: 5px 10px 10px 10px; +} + +div.sphinxsidebar ul { + margin: 10px; + padding: 0; + color: #ffffff; +} + +div.sphinxsidebar a { + color: #98dbcc; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + + + +/* -- hyperlink styles ------------------------------------------------------ */ + +a { + color: #355f7c; + text-decoration: none; +} + +a:visited { + color: #355f7c; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + + + +/* -- body styles ----------------------------------------------------------- */ + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Trebuchet MS', sans-serif; + background-color: #f2f2f2; + font-weight: normal; + color: #20435c; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + text-align: justify; + line-height: 130%; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.admonition p { + margin-bottom: 5px; +} + +div.admonition pre { + margin-bottom: 5px; +} + +div.admonition ul, div.admonition ol { + margin-bottom: 5px; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 5px; + background-color: #eeffcc; + color: #333333; + line-height: 120%; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +code { + background-color: #ecf0f3; + padding: 0 1px 0 1px; + font-size: 0.95em; +} + +th { + background-color: #ede; +} + +.warning code { + background: #efc2c2; +} + +.note code { + background: #d6d6d6; +} + +.viewcode-back { + font-family: sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} + +div.code-block-caption { + color: #efefef; + background-color: #1c4e63; +} \ No newline at end of file diff --git a/release_doc/0.9.0/doc/_static/comment-bright.png b/release_doc/0.9.0/doc/_static/comment-bright.png new file mode 100644 index 0000000000000000000000000000000000000000..551517b8c83b76f734ff791f847829a760ad1903 Binary files /dev/null and b/release_doc/0.9.0/doc/_static/comment-bright.png differ diff --git a/release_doc/0.9.0/doc/_static/comment-close.png b/release_doc/0.9.0/doc/_static/comment-close.png new file mode 100644 index 0000000000000000000000000000000000000000..09b54be46da3f0d4a5061da289dc91d8a2cdbc9c Binary files /dev/null and b/release_doc/0.9.0/doc/_static/comment-close.png differ diff --git a/release_doc/0.9.0/doc/_static/comment.png b/release_doc/0.9.0/doc/_static/comment.png new file mode 100644 index 0000000000000000000000000000000000000000..92feb52b8824c6b0f59b658b1196c61de9162a95 Binary files /dev/null and b/release_doc/0.9.0/doc/_static/comment.png differ diff --git a/release_doc/0.9.0/doc/_static/doctools.js b/release_doc/0.9.0/doc/_static/doctools.js new file mode 100644 index 0000000000000000000000000000000000000000..816349563588e87ca99c7cf2d6e54268e52e761d --- /dev/null +++ b/release_doc/0.9.0/doc/_static/doctools.js @@ -0,0 +1,287 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s == 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node) { + if (node.nodeType == 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { + var span = document.createElement("span"); + span.className = className; + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this); + }); + } + } + return this.each(function() { + highlight(this); + }); +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated == 'undefined') + return string; + return (typeof translated == 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated == 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) == 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this == '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); \ No newline at end of file diff --git a/release_doc/0.9.0/doc/_static/down-pressed.png b/release_doc/0.9.0/doc/_static/down-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..7c30d004b71b32bb2fc06b3bd4dc8278baab0946 Binary files /dev/null and b/release_doc/0.9.0/doc/_static/down-pressed.png differ diff --git a/release_doc/0.9.0/doc/_static/down.png b/release_doc/0.9.0/doc/_static/down.png new file mode 100644 index 0000000000000000000000000000000000000000..f48098a43b0c36342db9e1a9a7372e79b2484a59 Binary files /dev/null and b/release_doc/0.9.0/doc/_static/down.png differ diff --git a/release_doc/0.9.0/doc/_static/file.png b/release_doc/0.9.0/doc/_static/file.png new file mode 100644 index 0000000000000000000000000000000000000000..254c60bfbe2715ae2edca48ebccfd074deb8031d Binary files /dev/null and b/release_doc/0.9.0/doc/_static/file.png differ diff --git a/release_doc/0.9.0/doc/_static/jquery-1.11.1.js b/release_doc/0.9.0/doc/_static/jquery-1.11.1.js new file mode 100644 index 0000000000000000000000000000000000000000..d4b67f7e6c1a94df167f31657769717a71581066 --- /dev/null +++ b/release_doc/0.9.0/doc/_static/jquery-1.11.1.js @@ -0,0 +1,10308 @@ +/*! + * jQuery JavaScript Library v1.11.1 + * http://jquery.com/ + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * + * Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-05-01T17:42Z + */ + +(function( global, factory ) { + + if ( typeof module === "object" && typeof module.exports === "object" ) { + // For CommonJS and CommonJS-like environments where a proper window is present, + // execute the factory and get jQuery + // For environments that do not inherently posses a window with a document + // (such as Node.js), expose a jQuery-making factory as module.exports + // This accentuates the need for the creation of a real window + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +}(typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Can't do this because several apps including ASP.NET trace +// the stack via arguments.caller.callee and Firefox dies if +// you try to trace through "use strict" call chains. (#13335) +// Support: Firefox 18+ +// + +var deletedIds = []; + +var slice = deletedIds.slice; + +var concat = deletedIds.concat; + +var push = deletedIds.push; + +var indexOf = deletedIds.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var support = {}; + + + +var + version = "1.11.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android<4.1, IE<9 + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([\da-z])/gi, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // Start with an empty selector + selector: "", + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num != null ? + + // Return just the one element from the set + ( num < 0 ? this[ num + this.length ] : this[ num ] ) : + + // Return all the elements in a clean array + slice.call( this ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + ret.context = this.context; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + // (You can seed the arguments with an array of args, but this is + // only used internally.) + each: function( callback, args ) { + return jQuery.each( this, callback, args ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map(this, function( elem, i ) { + return callback.call( elem, i, elem ); + })); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(null); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: deletedIds.sort, + splice: deletedIds.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var src, copyIsArray, copy, name, options, clone, + target = arguments[0] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + target = {}; + } + + // extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values + if ( (options = arguments[ i ]) != null ) { + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray(src) ? src : []; + + } else { + clone = src && jQuery.isPlainObject(src) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend({ + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + // See test/unit/core.js for details concerning isFunction. + // Since version 1.3, DOM methods and functions like alert + // aren't supported. They return false on IE (#2968). + isFunction: function( obj ) { + return jQuery.type(obj) === "function"; + }, + + isArray: Array.isArray || function( obj ) { + return jQuery.type(obj) === "array"; + }, + + isWindow: function( obj ) { + /* jshint eqeqeq: false */ + return obj != null && obj == obj.window; + }, + + isNumeric: function( obj ) { + // parseFloat NaNs numeric-cast false positives (null|true|false|"") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + return !jQuery.isArray( obj ) && obj - parseFloat( obj ) >= 0; + }, + + isEmptyObject: function( obj ) { + var name; + for ( name in obj ) { + return false; + } + return true; + }, + + isPlainObject: function( obj ) { + var key; + + // Must be an Object. + // Because of IE, we also have to check the presence of the constructor property. + // Make sure that DOM nodes and window objects don't pass through, as well + if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { + return false; + } + + try { + // Not own constructor property must be Object + if ( obj.constructor && + !hasOwn.call(obj, "constructor") && + !hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { + return false; + } + } catch ( e ) { + // IE8,9 Will throw exceptions on certain host objects #9897 + return false; + } + + // Support: IE<9 + // Handle iteration over inherited properties before own properties. + if ( support.ownLast ) { + for ( key in obj ) { + return hasOwn.call( obj, key ); + } + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + for ( key in obj ) {} + + return key === undefined || hasOwn.call( obj, key ); + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call(obj) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + // Workarounds based on findings by Jim Driscoll + // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context + globalEval: function( data ) { + if ( data && jQuery.trim( data ) ) { + // We use execScript on Internet Explorer + // We use an anonymous function so that context is window + // rather than jQuery in Firefox + ( window.execScript || function( data ) { + window[ "eval" ].call( window, data ); + } )( data ); + } + }, + + // Convert dashed to camelCase; used by the css and data modules + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + // args is for internal usage only + each: function( obj, callback, args ) { + var value, + i = 0, + length = obj.length, + isArray = isArraylike( obj ); + + if ( args ) { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } + + // A special, fast, case for the most common use of each + } else { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } + } + + return obj; + }, + + // Support: Android<4.1, IE<9 + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArraylike( Object(arr) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + var len; + + if ( arr ) { + if ( indexOf ) { + return indexOf.call( arr, elem, i ); + } + + len = arr.length; + i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; + + for ( ; i < len; i++ ) { + // Skip accessing in sparse arrays + if ( i in arr && arr[ i ] === elem ) { + return i; + } + } + } + + return -1; + }, + + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + while ( j < len ) { + first[ i++ ] = second[ j++ ]; + } + + // Support: IE<9 + // Workaround casting of .length to NaN on otherwise arraylike objects (e.g., NodeLists) + if ( len !== len ) { + while ( second[j] !== undefined ) { + first[ i++ ] = second[ j++ ]; + } + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var value, + i = 0, + length = elems.length, + isArray = isArraylike( elems ), + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var args, proxy, tmp; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: function() { + return +( new Date() ); + }, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +}); + +// Populate the class2type map +jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +}); + +function isArraylike( obj ) { + var length = obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + if ( obj.nodeType === 1 && length ) { + return true; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v1.10.19 + * http://sizzlejs.com/ + * + * Copyright 2013 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-04-18 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + -(new Date()), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // General-purpose constants + strundefined = typeof undefined, + MAX_NEGATIVE = 1 << 31, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf if we can't use a native one + indexOf = arr.indexOf || function( elem ) { + var i = 0, + len = this.length; + for ( ; i < len; i++ ) { + if ( this[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + // http://www.w3.org/TR/css3-syntax/#characters + characterEncoding = "(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+", + + // Loosely modeled on CSS identifier characters + // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors + // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = characterEncoding.replace( "w", "w#" ), + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + characterEncoding + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + characterEncoding + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + characterEncoding + ")" ), + "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), + "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + rescape = /'|\\/g, + + // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }; + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var match, elem, m, nodeType, + // QSA vars + i, groups, old, nid, newContext, newSelector; + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + + context = context || document; + results = results || []; + + if ( !selector || typeof selector !== "string" ) { + return results; + } + + if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { + return []; + } + + if ( documentIsHTML && !seed ) { + + // Shortcuts + if ( (match = rquickExpr.exec( selector )) ) { + // Speed-up: Sizzle("#ID") + if ( (m = match[1]) ) { + if ( nodeType === 9 ) { + elem = context.getElementById( m ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document (jQuery #6963) + if ( elem && elem.parentNode ) { + // Handle the case where IE, Opera, and Webkit return items + // by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + } else { + // Context is not a document + if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && + contains( context, elem ) && elem.id === m ) { + results.push( elem ); + return results; + } + } + + // Speed-up: Sizzle("TAG") + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Speed-up: Sizzle(".CLASS") + } else if ( (m = match[3]) && support.getElementsByClassName && context.getElementsByClassName ) { + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // QSA path + if ( support.qsa && (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + nid = old = expando; + newContext = context; + newSelector = nodeType === 9 && selector; + + // qSA works strangely on Element-rooted queries + // We can work around this by specifying an extra ID on the root + // and working up from there (Thanks to Andrew Dupont for the technique) + // IE 8 doesn't work on object elements + if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { + groups = tokenize( selector ); + + if ( (old = context.getAttribute("id")) ) { + nid = old.replace( rescape, "\\$&" ); + } else { + context.setAttribute( "id", nid ); + } + nid = "[id='" + nid + "'] "; + + i = groups.length; + while ( i-- ) { + groups[i] = nid + toSelector( groups[i] ); + } + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || context; + newSelector = groups.join(","); + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch(qsaError) { + } finally { + if ( !old ) { + context.removeAttribute("id"); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {Function(string, Object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created div and expects a boolean result + */ +function assert( fn ) { + var div = document.createElement("div"); + + try { + return !!fn( div ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( div.parentNode ) { + div.parentNode.removeChild( div ); + } + // release memory in IE + div = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = attrs.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + ( ~b.sourceIndex || MAX_NEGATIVE ) - + ( ~a.sourceIndex || MAX_NEGATIVE ); + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== strundefined && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, + doc = node ? node.ownerDocument || node : preferredDoc, + parent = doc.defaultView; + + // If no document and documentElement is available, return + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Set our document + document = doc; + docElem = doc.documentElement; + + // Support tests + documentIsHTML = !isXML( doc ); + + // Support: IE>8 + // If iframe document is assigned to "document" variable and if iframe has been reloaded, + // IE will throw "permission denied" error when accessing "document" variable, see jQuery #13936 + // IE6-8 do not support the defaultView property so parent will be undefined + if ( parent && parent !== parent.top ) { + // IE11 does not have attachEvent, so all must suffer + if ( parent.addEventListener ) { + parent.addEventListener( "unload", function() { + setDocument(); + }, false ); + } else if ( parent.attachEvent ) { + parent.attachEvent( "onunload", function() { + setDocument(); + }); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties (excepting IE8 booleans) + support.attributes = assert(function( div ) { + div.className = "i"; + return !div.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( div ) { + div.appendChild( doc.createComment("") ); + return !div.getElementsByTagName("*").length; + }); + + // Check if getElementsByClassName can be trusted + support.getElementsByClassName = rnative.test( doc.getElementsByClassName ) && assert(function( div ) { + div.innerHTML = "
"; + + // Support: Safari<4 + // Catch class over-caching + div.firstChild.className = "i"; + // Support: Opera<10 + // Catch gEBCN failure to find non-leading classes + return div.getElementsByClassName("i").length === 2; + }); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( div ) { + docElem.appendChild( div ).id = expando; + return !doc.getElementsByName || !doc.getElementsByName( expando ).length; + }); + + // ID find and filter + if ( support.getById ) { + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== strundefined && documentIsHTML ) { + var m = context.getElementById( id ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + return m && m.parentNode ? [ m ] : []; + } + }; + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + } else { + // Support: IE6/7 + // getElementById is not reliable as a find shortcut + delete Expr.find["ID"]; + + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== strundefined ) { + return context.getElementsByTagName( tag ); + } + } : + function( tag, context ) { + var elem, + tmp = [], + i = 0, + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== strundefined && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See http://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( doc.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( div ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // http://bugs.jquery.com/ticket/12359 + div.innerHTML = ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // http://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( div.querySelectorAll("[msallowclip^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !div.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + }); + + assert(function( div ) { + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = doc.createElement("input"); + input.setAttribute( "type", "hidden" ); + div.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( div.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":enabled").length ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + div.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( div ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( div, "div" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( div, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully does not implement inclusive descendent + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === doc || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === doc || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === doc ? -1 : + b === doc ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return doc; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch(e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== strundefined && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, outerCache, node, diff, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + // Seek `elem` from a previously-cached index + outerCache = parent[ expando ] || (parent[ expando ] = {}); + cache = outerCache[ type ] || []; + nodeIndex = cache[0] === dirruns && cache[1]; + diff = cache[0] === dirruns && cache[2]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + outerCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + // Use previously-cached element index if available + } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { + diff = cache[1]; + + // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) + } else { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { + // Cache the index of each encountered element + if ( useCache ) { + (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf.call( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": function( elem ) { + return elem.disabled === false; + }, + + "disabled": function( elem ) { + return elem.disabled === true; + }, + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + checkNonElements = base && dir === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + if ( (oldCache = outerCache[ dir ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + outerCache[ dir ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf.call( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context !== document && context; + } + + // Add elements passing elementMatchers directly to results + // Keep `i` a string if there are no elements so `matchedCount` will be "00" below + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // Apply set filters to unmatched elements + matchedCount += i; + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is no seed and only one group + if ( match.length === 1 ) { + + // Take a shortcut and set the context if the root selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + support.getById && context.nodeType === 9 && documentIsHTML && + Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome<14 +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( div1 ) { + // Should return 1, but returns 4 (following) + return div1.compareDocumentPosition( document.createElement("div") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( div ) { + div.innerHTML = ""; + return div.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( div ) { + div.innerHTML = ""; + div.firstChild.setAttribute( "value", "" ); + return div.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( div ) { + return div.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; +jQuery.expr[":"] = jQuery.expr.pseudos; +jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; + + + +var rneedsContext = jQuery.expr.match.needsContext; + +var rsingleTag = (/^<(\w+)\s*\/?>(?:<\/\1>|)$/); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + /* jshint -W018 */ + return !!qualifier.call( elem, i, elem ) !== not; + }); + + } + + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + }); + + } + + if ( typeof qualifier === "string" ) { + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + qualifier = jQuery.filter( qualifier, elements ); + } + + return jQuery.grep( elements, function( elem ) { + return ( jQuery.inArray( elem, qualifier ) >= 0 ) !== not; + }); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + return elems.length === 1 && elem.nodeType === 1 ? + jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [] : + jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + })); +}; + +jQuery.fn.extend({ + find: function( selector ) { + var i, + ret = [], + self = this, + len = self.length; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + }) ); + } + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + // Needed because $( selector, context ) becomes $( context ).find( selector ) + ret = this.pushStack( len > 1 ? jQuery.unique( ret ) : ret ); + ret.selector = this.selector ? this.selector + " " + selector : selector; + return ret; + }, + filter: function( selector ) { + return this.pushStack( winnow(this, selector || [], false) ); + }, + not: function( selector ) { + return this.pushStack( winnow(this, selector || [], true) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +}); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // Use the correct document accordingly with window argument (sandbox) + document = window.document, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/, + + init = jQuery.fn.init = function( selector, context ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && (match[1] || !context) ) { + + // HANDLE: $(html) -> $(array) + if ( match[1] ) { + context = context instanceof jQuery ? context[0] : context; + + // scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[1], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[2] ); + + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + if ( elem && elem.parentNode ) { + // Handle the case where IE and Opera return items + // by name instead of ID + if ( elem.id !== match[2] ) { + return rootjQuery.find( selector ); + } + + // Otherwise, we inject the element directly into the jQuery object + this.length = 1; + this[0] = elem; + } + + this.context = document; + this.selector = selector; + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || rootjQuery ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this.context = this[0] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return typeof rootjQuery.ready !== "undefined" ? + rootjQuery.ready( selector ) : + // Execute immediately if ready is not present + selector( jQuery ); + } + + if ( selector.selector !== undefined ) { + this.selector = selector.selector; + this.context = selector.context; + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + // methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.extend({ + dir: function( elem, dir, until ) { + var matched = [], + cur = elem[ dir ]; + + while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { + if ( cur.nodeType === 1 ) { + matched.push( cur ); + } + cur = cur[dir]; + } + return matched; + }, + + sibling: function( n, elem ) { + var r = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + r.push( n ); + } + } + + return r; + } +}); + +jQuery.fn.extend({ + has: function( target ) { + var i, + targets = jQuery( target, this ), + len = targets.length; + + return this.filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( this, targets[i] ) ) { + return true; + } + } + }); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? + jQuery( selectors, context || this.context ) : + 0; + + for ( ; i < l; i++ ) { + for ( cur = this[i]; cur && cur !== context; cur = cur.parentNode ) { + // Always skip document fragments + if ( cur.nodeType < 11 && (pos ? + pos.index(cur) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector(cur, selectors)) ) { + + matched.push( cur ); + break; + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.unique( matched ) : matched ); + }, + + // Determine the position of an element within + // the matched set of elements + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[0] && this[0].parentNode ) ? this.first().prevAll().length : -1; + } + + // index in selector + if ( typeof elem === "string" ) { + return jQuery.inArray( this[0], jQuery( elem ) ); + } + + // Locate the position of the desired element + return jQuery.inArray( + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[0] : elem, this ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.unique( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter(selector) + ); + } +}); + +function sibling( cur, dir ) { + do { + cur = cur[ dir ]; + } while ( cur && cur.nodeType !== 1 ); + + return cur; +} + +jQuery.each({ + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return jQuery.dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return jQuery.dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return jQuery.dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return jQuery.dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return jQuery.dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return jQuery.dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return jQuery.sibling( elem.firstChild ); + }, + contents: function( elem ) { + return jQuery.nodeName( elem, "iframe" ) ? + elem.contentDocument || elem.contentWindow.document : + jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var ret = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + ret = jQuery.filter( selector, ret ); + } + + if ( this.length > 1 ) { + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + ret = jQuery.unique( ret ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + ret = ret.reverse(); + } + } + + return this.pushStack( ret ); + }; +}); +var rnotwhite = (/\S+/g); + + + +// String to Object options format cache +var optionsCache = {}; + +// Convert String-formatted options into Object-formatted ones and store in cache +function createOptions( options ) { + var object = optionsCache[ options ] = {}; + jQuery.each( options.match( rnotwhite ) || [], function( _, flag ) { + object[ flag ] = true; + }); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + ( optionsCache[ options ] || createOptions( options ) ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + // Last fire value (for non-forgettable lists) + memory, + // Flag to know if list was already fired + fired, + // End of the loop when firing + firingLength, + // Index of currently firing callback (modified by remove if needed) + firingIndex, + // First callback to fire (used internally by add and fireWith) + firingStart, + // Actual callback list + list = [], + // Stack of fire calls for repeatable lists + stack = !options.once && [], + // Fire callbacks + fire = function( data ) { + memory = options.memory && data; + fired = true; + firingIndex = firingStart || 0; + firingStart = 0; + firingLength = list.length; + firing = true; + for ( ; list && firingIndex < firingLength; firingIndex++ ) { + if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { + memory = false; // To prevent further calls using add + break; + } + } + firing = false; + if ( list ) { + if ( stack ) { + if ( stack.length ) { + fire( stack.shift() ); + } + } else if ( memory ) { + list = []; + } else { + self.disable(); + } + } + }, + // Actual Callbacks object + self = { + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + // First, we save the current length + var start = list.length; + (function add( args ) { + jQuery.each( args, function( _, arg ) { + var type = jQuery.type( arg ); + if ( type === "function" ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && type !== "string" ) { + // Inspect recursively + add( arg ); + } + }); + })( arguments ); + // Do we need to add the callbacks to the + // current firing batch? + if ( firing ) { + firingLength = list.length; + // With memory, if we're not firing then + // we should call right away + } else if ( memory ) { + firingStart = start; + fire( memory ); + } + } + return this; + }, + // Remove a callback from the list + remove: function() { + if ( list ) { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + // Handle firing indexes + if ( firing ) { + if ( index <= firingLength ) { + firingLength--; + } + if ( index <= firingIndex ) { + firingIndex--; + } + } + } + }); + } + return this; + }, + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? jQuery.inArray( fn, list ) > -1 : !!( list && list.length ); + }, + // Remove all callbacks from the list + empty: function() { + list = []; + firingLength = 0; + return this; + }, + // Have the list do nothing anymore + disable: function() { + list = stack = memory = undefined; + return this; + }, + // Is it disabled? + disabled: function() { + return !list; + }, + // Lock the list in its current state + lock: function() { + stack = undefined; + if ( !memory ) { + self.disable(); + } + return this; + }, + // Is it locked? + locked: function() { + return !stack; + }, + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( list && ( !fired || stack ) ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + if ( firing ) { + stack.push( args ); + } else { + fire( args ); + } + } + return this; + }, + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +jQuery.extend({ + + Deferred: function( func ) { + var tuples = [ + // action, add listener, listener list, final state + [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], + [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], + [ "notify", "progress", jQuery.Callbacks("memory") ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + then: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + return jQuery.Deferred(function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + var fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; + // deferred[ done | fail | progress ] for forwarding actions to newDefer + deferred[ tuple[1] ](function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .done( newDefer.resolve ) + .fail( newDefer.reject ) + .progress( newDefer.notify ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); + } + }); + }); + fns = null; + }).promise(); + }, + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Keep pipe for back-compat + promise.pipe = promise.then; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 3 ]; + + // promise[ done | fail | progress ] = list.add + promise[ tuple[1] ] = list.add; + + // Handle state + if ( stateString ) { + list.add(function() { + // state = [ resolved | rejected ] + state = stateString; + + // [ reject_list | resolve_list ].disable; progress_list.lock + }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); + } + + // deferred[ resolve | reject | notify ] + deferred[ tuple[0] ] = function() { + deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); + return this; + }; + deferred[ tuple[0] + "With" ] = list.fireWith; + }); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( subordinate /* , ..., subordinateN */ ) { + var i = 0, + resolveValues = slice.call( arguments ), + length = resolveValues.length, + + // the count of uncompleted subordinates + remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, + + // the master Deferred. If resolveValues consist of only a single Deferred, just use that. + deferred = remaining === 1 ? subordinate : jQuery.Deferred(), + + // Update function for both resolve and progress values + updateFunc = function( i, contexts, values ) { + return function( value ) { + contexts[ i ] = this; + values[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( values === progressValues ) { + deferred.notifyWith( contexts, values ); + + } else if ( !(--remaining) ) { + deferred.resolveWith( contexts, values ); + } + }; + }, + + progressValues, progressContexts, resolveContexts; + + // add listeners to Deferred subordinates; treat others as resolved + if ( length > 1 ) { + progressValues = new Array( length ); + progressContexts = new Array( length ); + resolveContexts = new Array( length ); + for ( ; i < length; i++ ) { + if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { + resolveValues[ i ].promise() + .done( updateFunc( i, resolveContexts, resolveValues ) ) + .fail( deferred.reject ) + .progress( updateFunc( i, progressContexts, progressValues ) ); + } else { + --remaining; + } + } + } + + // if we're not waiting on anything, resolve the master + if ( !remaining ) { + deferred.resolveWith( resolveContexts, resolveValues ); + } + + return deferred.promise(); + } +}); + + +// The deferred used on DOM ready +var readyList; + +jQuery.fn.ready = function( fn ) { + // Add the callback + jQuery.ready.promise().done( fn ); + + return this; +}; + +jQuery.extend({ + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Hold (or release) the ready event + holdReady: function( hold ) { + if ( hold ) { + jQuery.readyWait++; + } else { + jQuery.ready( true ); + } + }, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( !document.body ) { + return setTimeout( jQuery.ready ); + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + + // Trigger any bound ready events + if ( jQuery.fn.triggerHandler ) { + jQuery( document ).triggerHandler( "ready" ); + jQuery( document ).off( "ready" ); + } + } +}); + +/** + * Clean-up method for dom ready events + */ +function detach() { + if ( document.addEventListener ) { + document.removeEventListener( "DOMContentLoaded", completed, false ); + window.removeEventListener( "load", completed, false ); + + } else { + document.detachEvent( "onreadystatechange", completed ); + window.detachEvent( "onload", completed ); + } +} + +/** + * The ready event handler and self cleanup method + */ +function completed() { + // readyState === "complete" is good enough for us to call the dom ready in oldIE + if ( document.addEventListener || event.type === "load" || document.readyState === "complete" ) { + detach(); + jQuery.ready(); + } +} + +jQuery.ready.promise = function( obj ) { + if ( !readyList ) { + + readyList = jQuery.Deferred(); + + // Catch cases where $(document).ready() is called after the browser event has already occurred. + // we once tried to use readyState "interactive" here, but it caused issues like the one + // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 + if ( document.readyState === "complete" ) { + // Handle it asynchronously to allow scripts the opportunity to delay ready + setTimeout( jQuery.ready ); + + // Standards-based browsers support DOMContentLoaded + } else if ( document.addEventListener ) { + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed, false ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed, false ); + + // If IE event model is used + } else { + // Ensure firing before onload, maybe late but safe also for iframes + document.attachEvent( "onreadystatechange", completed ); + + // A fallback to window.onload, that will always work + window.attachEvent( "onload", completed ); + + // If IE and not a frame + // continually check to see if the document is ready + var top = false; + + try { + top = window.frameElement == null && document.documentElement; + } catch(e) {} + + if ( top && top.doScroll ) { + (function doScrollCheck() { + if ( !jQuery.isReady ) { + + try { + // Use the trick by Diego Perini + // http://javascript.nwbox.com/IEContentLoaded/ + top.doScroll("left"); + } catch(e) { + return setTimeout( doScrollCheck, 50 ); + } + + // detach all dom ready events + detach(); + + // and execute any waiting functions + jQuery.ready(); + } + })(); + } + } + } + return readyList.promise( obj ); +}; + + +var strundefined = typeof undefined; + + + +// Support: IE<9 +// Iteration over object's inherited properties before its own +var i; +for ( i in jQuery( support ) ) { + break; +} +support.ownLast = i !== "0"; + +// Note: most support tests are defined in their respective modules. +// false until the test is run +support.inlineBlockNeedsLayout = false; + +// Execute ASAP in case we need to set body.style.zoom +jQuery(function() { + // Minified: var a,b,c,d + var val, div, body, container; + + body = document.getElementsByTagName( "body" )[ 0 ]; + if ( !body || !body.style ) { + // Return for frameset docs that don't have a body + return; + } + + // Setup + div = document.createElement( "div" ); + container = document.createElement( "div" ); + container.style.cssText = "position:absolute;border:0;width:0;height:0;top:0;left:-9999px"; + body.appendChild( container ).appendChild( div ); + + if ( typeof div.style.zoom !== strundefined ) { + // Support: IE<8 + // Check if natively block-level elements act like inline-block + // elements when setting their display to 'inline' and giving + // them layout + div.style.cssText = "display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1"; + + support.inlineBlockNeedsLayout = val = div.offsetWidth === 3; + if ( val ) { + // Prevent IE 6 from affecting layout for positioned elements #11048 + // Prevent IE from shrinking the body in IE 7 mode #12869 + // Support: IE<8 + body.style.zoom = 1; + } + } + + body.removeChild( container ); +}); + + + + +(function() { + var div = document.createElement( "div" ); + + // Execute the test only if not already executed in another module. + if (support.deleteExpando == null) { + // Support: IE<9 + support.deleteExpando = true; + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + } + + // Null elements to avoid leaks in IE. + div = null; +})(); + + +/** + * Determines whether an object can have data + */ +jQuery.acceptData = function( elem ) { + var noData = jQuery.noData[ (elem.nodeName + " ").toLowerCase() ], + nodeType = +elem.nodeType || 1; + + // Do not set data on non-element DOM nodes because it will not be cleared (#8335). + return nodeType !== 1 && nodeType !== 9 ? + false : + + // Nodes accept data unless otherwise specified; rejection can be conditional + !noData || noData !== true && elem.getAttribute("classid") === noData; +}; + + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /([A-Z])/g; + +function dataAttr( elem, key, data ) { + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + + var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); + + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = data === "true" ? true : + data === "false" ? false : + data === "null" ? null : + // Only convert to a number if it doesn't change the string + +data + "" === data ? +data : + rbrace.test( data ) ? jQuery.parseJSON( data ) : + data; + } catch( e ) {} + + // Make sure we set the data so it isn't changed later + jQuery.data( elem, key, data ); + + } else { + data = undefined; + } + } + + return data; +} + +// checks a cache object for emptiness +function isEmptyDataObject( obj ) { + var name; + for ( name in obj ) { + + // if the public data object is empty, the private is still empty + if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { + continue; + } + if ( name !== "toJSON" ) { + return false; + } + } + + return true; +} + +function internalData( elem, name, data, pvt /* Internal Use Only */ ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var ret, thisCache, + internalKey = jQuery.expando, + + // We have to handle DOM nodes and JS objects differently because IE6-7 + // can't GC object references properly across the DOM-JS boundary + isNode = elem.nodeType, + + // Only DOM nodes need the global jQuery cache; JS object data is + // attached directly to the object so GC can occur automatically + cache = isNode ? jQuery.cache : elem, + + // Only defining an ID for JS objects if its cache already exists allows + // the code to shortcut on the same path as a DOM node with no cache + id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; + + // Avoid doing any more work than we need to when trying to get data on an + // object that has no data at all + if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && data === undefined && typeof name === "string" ) { + return; + } + + if ( !id ) { + // Only DOM nodes need a new unique ID for each element since their data + // ends up in the global cache + if ( isNode ) { + id = elem[ internalKey ] = deletedIds.pop() || jQuery.guid++; + } else { + id = internalKey; + } + } + + if ( !cache[ id ] ) { + // Avoid exposing jQuery metadata on plain JS objects when the object + // is serialized using JSON.stringify + cache[ id ] = isNode ? {} : { toJSON: jQuery.noop }; + } + + // An object can be passed to jQuery.data instead of a key/value pair; this gets + // shallow copied over onto the existing cache + if ( typeof name === "object" || typeof name === "function" ) { + if ( pvt ) { + cache[ id ] = jQuery.extend( cache[ id ], name ); + } else { + cache[ id ].data = jQuery.extend( cache[ id ].data, name ); + } + } + + thisCache = cache[ id ]; + + // jQuery data() is stored in a separate object inside the object's internal data + // cache in order to avoid key collisions between internal data and user-defined + // data. + if ( !pvt ) { + if ( !thisCache.data ) { + thisCache.data = {}; + } + + thisCache = thisCache.data; + } + + if ( data !== undefined ) { + thisCache[ jQuery.camelCase( name ) ] = data; + } + + // Check for both converted-to-camel and non-converted data property names + // If a data property was specified + if ( typeof name === "string" ) { + + // First Try to find as-is property data + ret = thisCache[ name ]; + + // Test for null|undefined property data + if ( ret == null ) { + + // Try to find the camelCased property + ret = thisCache[ jQuery.camelCase( name ) ]; + } + } else { + ret = thisCache; + } + + return ret; +} + +function internalRemoveData( elem, name, pvt ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var thisCache, i, + isNode = elem.nodeType, + + // See jQuery.data for more information + cache = isNode ? jQuery.cache : elem, + id = isNode ? elem[ jQuery.expando ] : jQuery.expando; + + // If there is already no cache entry for this object, there is no + // purpose in continuing + if ( !cache[ id ] ) { + return; + } + + if ( name ) { + + thisCache = pvt ? cache[ id ] : cache[ id ].data; + + if ( thisCache ) { + + // Support array or space separated string names for data keys + if ( !jQuery.isArray( name ) ) { + + // try the string as a key before any manipulation + if ( name in thisCache ) { + name = [ name ]; + } else { + + // split the camel cased version by spaces unless a key with the spaces exists + name = jQuery.camelCase( name ); + if ( name in thisCache ) { + name = [ name ]; + } else { + name = name.split(" "); + } + } + } else { + // If "name" is an array of keys... + // When data is initially created, via ("key", "val") signature, + // keys will be converted to camelCase. + // Since there is no way to tell _how_ a key was added, remove + // both plain key and camelCase key. #12786 + // This will only penalize the array argument path. + name = name.concat( jQuery.map( name, jQuery.camelCase ) ); + } + + i = name.length; + while ( i-- ) { + delete thisCache[ name[i] ]; + } + + // If there is no data left in the cache, we want to continue + // and let the cache object itself get destroyed + if ( pvt ? !isEmptyDataObject(thisCache) : !jQuery.isEmptyObject(thisCache) ) { + return; + } + } + } + + // See jQuery.data for more information + if ( !pvt ) { + delete cache[ id ].data; + + // Don't destroy the parent cache unless the internal data object + // had been the only thing left in it + if ( !isEmptyDataObject( cache[ id ] ) ) { + return; + } + } + + // Destroy the cache + if ( isNode ) { + jQuery.cleanData( [ elem ], true ); + + // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) + /* jshint eqeqeq: false */ + } else if ( support.deleteExpando || cache != cache.window ) { + /* jshint eqeqeq: true */ + delete cache[ id ]; + + // When all else fails, null + } else { + cache[ id ] = null; + } +} + +jQuery.extend({ + cache: {}, + + // The following elements (space-suffixed to avoid Object.prototype collisions) + // throw uncatchable exceptions if you attempt to set expando properties + noData: { + "applet ": true, + "embed ": true, + // ...but Flash objects (which have this classid) *can* handle expandos + "object ": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" + }, + + hasData: function( elem ) { + elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; + return !!elem && !isEmptyDataObject( elem ); + }, + + data: function( elem, name, data ) { + return internalData( elem, name, data ); + }, + + removeData: function( elem, name ) { + return internalRemoveData( elem, name ); + }, + + // For internal use only. + _data: function( elem, name, data ) { + return internalData( elem, name, data, true ); + }, + + _removeData: function( elem, name ) { + return internalRemoveData( elem, name, true ); + } +}); + +jQuery.fn.extend({ + data: function( key, value ) { + var i, name, data, + elem = this[0], + attrs = elem && elem.attributes; + + // Special expections of .data basically thwart jQuery.access, + // so implement the relevant behavior ourselves + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = jQuery.data( elem ); + + if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE11+ + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice(5) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + jQuery._data( elem, "parsedAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each(function() { + jQuery.data( this, key ); + }); + } + + return arguments.length > 1 ? + + // Sets one value + this.each(function() { + jQuery.data( this, key, value ); + }) : + + // Gets one value + // Try to fetch any internally stored data first + elem ? dataAttr( elem, key, jQuery.data( elem, key ) ) : undefined; + }, + + removeData: function( key ) { + return this.each(function() { + jQuery.removeData( this, key ); + }); + } +}); + + +jQuery.extend({ + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = jQuery._data( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || jQuery.isArray(data) ) { + queue = jQuery._data( elem, type, jQuery.makeArray(data) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // not intended for public consumption - generates a queueHooks object, or returns the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return jQuery._data( elem, key ) || jQuery._data( elem, key, { + empty: jQuery.Callbacks("once memory").add(function() { + jQuery._removeData( elem, type + "queue" ); + jQuery._removeData( elem, key ); + }) + }); + } +}); + +jQuery.fn.extend({ + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[0], type ); + } + + return data === undefined ? + this : + this.each(function() { + var queue = jQuery.queue( this, type, data ); + + // ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[0] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + }); + }, + dequeue: function( type ) { + return this.each(function() { + jQuery.dequeue( this, type ); + }); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = jQuery._data( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +}); +var pnum = (/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/).source; + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHidden = function( elem, el ) { + // isHidden might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); + }; + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = jQuery.access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + length = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + jQuery.access( elems, fn, i, key[i], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < length; i++ ) { + fn( elems[i], key, raw ? value : value.call( elems[i], i, fn( elems[i], key ) ) ); + } + } + } + + return chainable ? + elems : + + // Gets + bulk ? + fn.call( elems ) : + length ? fn( elems[0], key ) : emptyGet; +}; +var rcheckableType = (/^(?:checkbox|radio)$/i); + + + +(function() { + // Minified: var a,b,c + var input = document.createElement( "input" ), + div = document.createElement( "div" ), + fragment = document.createDocumentFragment(); + + // Setup + div.innerHTML = "
a"; + + // IE strips leading whitespace when .innerHTML is used + support.leadingWhitespace = div.firstChild.nodeType === 3; + + // Make sure that tbody elements aren't automatically inserted + // IE will insert them into empty tables + support.tbody = !div.getElementsByTagName( "tbody" ).length; + + // Make sure that link elements get serialized correctly by innerHTML + // This requires a wrapper element in IE + support.htmlSerialize = !!div.getElementsByTagName( "link" ).length; + + // Makes sure cloning an html5 element does not cause problems + // Where outerHTML is undefined, this still works + support.html5Clone = + document.createElement( "nav" ).cloneNode( true ).outerHTML !== "<:nav>"; + + // Check if a disconnected checkbox will retain its checked + // value of true after appended to the DOM (IE6/7) + input.type = "checkbox"; + input.checked = true; + fragment.appendChild( input ); + support.appendChecked = input.checked; + + // Make sure textarea (and checkbox) defaultValue is properly cloned + // Support: IE6-IE11+ + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // #11217 - WebKit loses check when the name is after the checked attribute + fragment.appendChild( div ); + div.innerHTML = ""; + + // Support: Safari 5.1, iOS 5.1, Android 4.x, Android 2.3 + // old WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE<9 + // Opera does not clone events (and typeof div.attachEvent === undefined). + // IE9-10 clones events bound via attachEvent, but they don't trigger with .click() + support.noCloneEvent = true; + if ( div.attachEvent ) { + div.attachEvent( "onclick", function() { + support.noCloneEvent = false; + }); + + div.cloneNode( true ).click(); + } + + // Execute the test only if not already executed in another module. + if (support.deleteExpando == null) { + // Support: IE<9 + support.deleteExpando = true; + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + } +})(); + + +(function() { + var i, eventName, + div = document.createElement( "div" ); + + // Support: IE<9 (lack submit/change bubble), Firefox 23+ (lack focusin event) + for ( i in { submit: true, change: true, focusin: true }) { + eventName = "on" + i; + + if ( !(support[ i + "Bubbles" ] = eventName in window) ) { + // Beware of CSP restrictions (https://developer.mozilla.org/en/Security/CSP) + div.setAttribute( eventName, "t" ); + support[ i + "Bubbles" ] = div.attributes[ eventName ].expando === false; + } + } + + // Null elements to avoid leaks in IE. + div = null; +})(); + + +var rformElems = /^(?:input|select|textarea)$/i, + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu)|click/, + rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)$/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + var tmp, events, t, handleObjIn, + special, eventHandle, handleObj, + handlers, type, namespaces, origType, + elemData = jQuery._data( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !(events = elemData.events) ) { + events = elemData.events = {}; + } + if ( !(eventHandle = elemData.handle) ) { + eventHandle = elemData.handle = function( e ) { + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== strundefined && (!e || jQuery.event.triggered !== e.type) ? + jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : + undefined; + }; + // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events + eventHandle.elem = elem; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnotwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[t] ) || []; + type = origType = tmp[1]; + namespaces = ( tmp[2] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend({ + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join(".") + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !(handlers = events[ type ]) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener/attachEvent if the special events handler returns false + if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + // Bind the global event handler to the element + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle, false ); + + } else if ( elem.attachEvent ) { + elem.attachEvent( "on" + type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + // Nullify elem to prevent memory leaks in IE + elem = null; + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + var j, handleObj, tmp, + origCount, t, events, + special, handlers, type, + namespaces, origType, + elemData = jQuery.hasData( elem ) && jQuery._data( elem ); + + if ( !elemData || !(events = elemData.events) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnotwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[t] ) || []; + type = origType = tmp[1]; + namespaces = ( tmp[2] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[2] && new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + delete elemData.handle; + + // removeData also checks for emptiness and clears the expando if empty + // so use it instead of delete + jQuery._removeData( elem, "events" ); + } + }, + + trigger: function( event, data, elem, onlyHandlers ) { + var handle, ontype, cur, + bubbleType, special, tmp, i, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split(".") : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf(".") >= 0 ) { + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split("."); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf(":") < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join("."); + event.namespace_re = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === (elem.ownerDocument || document) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( (cur = eventPath[i++]) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && jQuery.acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( (!special._default || special._default.apply( eventPath.pop(), data ) === false) && + jQuery.acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name name as the event. + // Can't use an .isFunction() check here because IE6/7 fails that test. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && elem[ type ] && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + try { + elem[ type ](); + } catch ( e ) { + // IE<9 dies on focus/blur to hidden element (#1486,#12518) + // only reproducible on winXP IE8 native, not IE9 in IE8 mode + } + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + dispatch: function( event ) { + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( event ); + + var i, ret, handleObj, matched, j, + handlerQueue = [], + args = slice.call( arguments ), + handlers = ( jQuery._data( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[0] = event; + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( (matched = handlerQueue[ i++ ]) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( (handleObj = matched.handlers[ j++ ]) && !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or + // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). + if ( !event.namespace_re || event.namespace_re.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) + .apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( (event.result = ret) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var sel, handleObj, matches, i, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + // Black-hole SVG instance trees (#13180) + // Avoid non-left-click bubbling in Firefox (#3861) + if ( delegateCount && cur.nodeType && (!event.button || event.type !== "click") ) { + + /* jshint eqeqeq: false */ + for ( ; cur != this; cur = cur.parentNode || this ) { + /* jshint eqeqeq: true */ + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && (cur.disabled !== true || event.type !== "click") ) { + matches = []; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matches[ sel ] === undefined ) { + matches[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) >= 0 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matches[ sel ] ) { + matches.push( handleObj ); + } + } + if ( matches.length ) { + handlerQueue.push({ elem: cur, handlers: matches }); + } + } + } + } + + // Add the remaining (directly-bound) handlers + if ( delegateCount < handlers.length ) { + handlerQueue.push({ elem: this, handlers: handlers.slice( delegateCount ) }); + } + + return handlerQueue; + }, + + fix: function( event ) { + if ( event[ jQuery.expando ] ) { + return event; + } + + // Create a writable copy of the event object and normalize some properties + var i, prop, copy, + type = event.type, + originalEvent = event, + fixHook = this.fixHooks[ type ]; + + if ( !fixHook ) { + this.fixHooks[ type ] = fixHook = + rmouseEvent.test( type ) ? this.mouseHooks : + rkeyEvent.test( type ) ? this.keyHooks : + {}; + } + copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; + + event = new jQuery.Event( originalEvent ); + + i = copy.length; + while ( i-- ) { + prop = copy[ i ]; + event[ prop ] = originalEvent[ prop ]; + } + + // Support: IE<9 + // Fix target property (#1925) + if ( !event.target ) { + event.target = originalEvent.srcElement || document; + } + + // Support: Chrome 23+, Safari? + // Target should not be a text node (#504, #13143) + if ( event.target.nodeType === 3 ) { + event.target = event.target.parentNode; + } + + // Support: IE<9 + // For mouse/key events, metaKey==false if it's undefined (#3368, #11328) + event.metaKey = !!event.metaKey; + + return fixHook.filter ? fixHook.filter( event, originalEvent ) : event; + }, + + // Includes some event props shared by KeyEvent and MouseEvent + props: "altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), + + fixHooks: {}, + + keyHooks: { + props: "char charCode key keyCode".split(" "), + filter: function( event, original ) { + + // Add which for key events + if ( event.which == null ) { + event.which = original.charCode != null ? original.charCode : original.keyCode; + } + + return event; + } + }, + + mouseHooks: { + props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), + filter: function( event, original ) { + var body, eventDoc, doc, + button = original.button, + fromElement = original.fromElement; + + // Calculate pageX/Y if missing and clientX/Y available + if ( event.pageX == null && original.clientX != null ) { + eventDoc = event.target.ownerDocument || document; + doc = eventDoc.documentElement; + body = eventDoc.body; + + event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); + event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); + } + + // Add relatedTarget, if necessary + if ( !event.relatedTarget && fromElement ) { + event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + // Note: button is not normalized, so don't use it + if ( !event.which && button !== undefined ) { + event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); + } + + return event; + } + }, + + special: { + load: { + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + try { + this.focus(); + return false; + } catch ( e ) { + // Support: IE<9 + // If we error on focus to hidden element (#1486, #12518), + // let .trigger() run the handlers + } + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( jQuery.nodeName( this, "input" ) && this.type === "checkbox" && this.click ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return jQuery.nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + }, + + simulate: function( type, elem, event, bubble ) { + // Piggyback on a donor event to simulate a different one. + // Fake originalEvent to avoid donor's stopPropagation, but if the + // simulated event prevents default then we do the same on the donor. + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true, + originalEvent: {} + } + ); + if ( bubble ) { + jQuery.event.trigger( e, null, elem ); + } else { + jQuery.event.dispatch.call( elem, e ); + } + if ( e.isDefaultPrevented() ) { + event.preventDefault(); + } + } +}; + +jQuery.removeEvent = document.removeEventListener ? + function( elem, type, handle ) { + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle, false ); + } + } : + function( elem, type, handle ) { + var name = "on" + type; + + if ( elem.detachEvent ) { + + // #8545, #7054, preventing memory leaks for custom events in IE6-8 + // detachEvent needed property on element, by name of that event, to properly expose it to GC + if ( typeof elem[ name ] === strundefined ) { + elem[ name ] = null; + } + + elem.detachEvent( name, handle ); + } + }; + +jQuery.Event = function( src, props ) { + // Allow instantiation without the 'new' keyword + if ( !(this instanceof jQuery.Event) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + // Support: IE < 9, Android < 4.0 + src.returnValue === false ? + returnTrue : + returnFalse; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + if ( !e ) { + return; + } + + // If preventDefault exists, run it on the original event + if ( e.preventDefault ) { + e.preventDefault(); + + // Support: IE + // Otherwise set the returnValue property of the original event to false + } else { + e.returnValue = false; + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + if ( !e ) { + return; + } + // If stopPropagation exists, run it on the original event + if ( e.stopPropagation ) { + e.stopPropagation(); + } + + // Support: IE + // Set the cancelBubble property of the original event to true + e.cancelBubble = true; + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && e.stopImmediatePropagation ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Create mouseenter/leave events using mouseover/out and event-time checks +jQuery.each({ + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mousenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || (related !== target && !jQuery.contains( target, related )) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +}); + +// IE submit delegation +if ( !support.submitBubbles ) { + + jQuery.event.special.submit = { + setup: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Lazy-add a submit handler when a descendant form may potentially be submitted + jQuery.event.add( this, "click._submit keypress._submit", function( e ) { + // Node name check avoids a VML-related crash in IE (#9807) + var elem = e.target, + form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; + if ( form && !jQuery._data( form, "submitBubbles" ) ) { + jQuery.event.add( form, "submit._submit", function( event ) { + event._submit_bubble = true; + }); + jQuery._data( form, "submitBubbles", true ); + } + }); + // return undefined since we don't need an event listener + }, + + postDispatch: function( event ) { + // If form was submitted by the user, bubble the event up the tree + if ( event._submit_bubble ) { + delete event._submit_bubble; + if ( this.parentNode && !event.isTrigger ) { + jQuery.event.simulate( "submit", this.parentNode, event, true ); + } + } + }, + + teardown: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Remove delegated handlers; cleanData eventually reaps submit handlers attached above + jQuery.event.remove( this, "._submit" ); + } + }; +} + +// IE change delegation and checkbox/radio fix +if ( !support.changeBubbles ) { + + jQuery.event.special.change = { + + setup: function() { + + if ( rformElems.test( this.nodeName ) ) { + // IE doesn't fire change on a check/radio until blur; trigger it on click + // after a propertychange. Eat the blur-change in special.change.handle. + // This still fires onchange a second time for check/radio after blur. + if ( this.type === "checkbox" || this.type === "radio" ) { + jQuery.event.add( this, "propertychange._change", function( event ) { + if ( event.originalEvent.propertyName === "checked" ) { + this._just_changed = true; + } + }); + jQuery.event.add( this, "click._change", function( event ) { + if ( this._just_changed && !event.isTrigger ) { + this._just_changed = false; + } + // Allow triggered, simulated change events (#11500) + jQuery.event.simulate( "change", this, event, true ); + }); + } + return false; + } + // Delegated event; lazy-add a change handler on descendant inputs + jQuery.event.add( this, "beforeactivate._change", function( e ) { + var elem = e.target; + + if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "changeBubbles" ) ) { + jQuery.event.add( elem, "change._change", function( event ) { + if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { + jQuery.event.simulate( "change", this.parentNode, event, true ); + } + }); + jQuery._data( elem, "changeBubbles", true ); + } + }); + }, + + handle: function( event ) { + var elem = event.target; + + // Swallow native change events from checkbox/radio, we already triggered them above + if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { + return event.handleObj.handler.apply( this, arguments ); + } + }, + + teardown: function() { + jQuery.event.remove( this, "._change" ); + + return !rformElems.test( this.nodeName ); + } + }; +} + +// Create "bubbling" focus and blur events +if ( !support.focusinBubbles ) { + jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = jQuery._data( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + jQuery._data( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = jQuery._data( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + jQuery._removeData( doc, fix ); + } else { + jQuery._data( doc, fix, attaches ); + } + } + }; + }); +} + +jQuery.fn.extend({ + + on: function( types, selector, data, fn, /*INTERNAL*/ one ) { + var type, origFn; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + this.on( type, selector, data, types[ type ], one ); + } + return this; + } + + if ( data == null && fn == null ) { + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return this; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return this.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + }); + }, + one: function( types, selector, data, fn ) { + return this.on( types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each(function() { + jQuery.event.remove( this, types, fn, selector ); + }); + }, + + trigger: function( type, data ) { + return this.each(function() { + jQuery.event.trigger( type, data, this ); + }); + }, + triggerHandler: function( type, data ) { + var elem = this[0]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +}); + + +function createSafeFragment( document ) { + var list = nodeNames.split( "|" ), + safeFrag = document.createDocumentFragment(); + + if ( safeFrag.createElement ) { + while ( list.length ) { + safeFrag.createElement( + list.pop() + ); + } + } + return safeFrag; +} + +var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + + "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", + rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, + rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\\s/>]", "i"), + rleadingWhitespace = /^\s+/, + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, + rtagName = /<([\w:]+)/, + rtbody = /\s*$/g, + + // We have to close these tags to support XHTML (#13200) + wrapMap = { + option: [ 1, "" ], + legend: [ 1, "
", "
" ], + area: [ 1, "", "" ], + param: [ 1, "", "" ], + thead: [ 1, "", "
" ], + tr: [ 2, "", "
" ], + col: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + // IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, + // unless wrapped in a div with non-breaking characters in front of it. + _default: support.htmlSerialize ? [ 0, "", "" ] : [ 1, "X
", "
" ] + }, + safeFragment = createSafeFragment( document ), + fragmentDiv = safeFragment.appendChild( document.createElement("div") ); + +wrapMap.optgroup = wrapMap.option; +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +function getAll( context, tag ) { + var elems, elem, + i = 0, + found = typeof context.getElementsByTagName !== strundefined ? context.getElementsByTagName( tag || "*" ) : + typeof context.querySelectorAll !== strundefined ? context.querySelectorAll( tag || "*" ) : + undefined; + + if ( !found ) { + for ( found = [], elems = context.childNodes || context; (elem = elems[i]) != null; i++ ) { + if ( !tag || jQuery.nodeName( elem, tag ) ) { + found.push( elem ); + } else { + jQuery.merge( found, getAll( elem, tag ) ); + } + } + } + + return tag === undefined || tag && jQuery.nodeName( context, tag ) ? + jQuery.merge( [ context ], found ) : + found; +} + +// Used in buildFragment, fixes the defaultChecked property +function fixDefaultChecked( elem ) { + if ( rcheckableType.test( elem.type ) ) { + elem.defaultChecked = elem.checked; + } +} + +// Support: IE<8 +// Manipulating tables requires a tbody +function manipulationTarget( elem, content ) { + return jQuery.nodeName( elem, "table" ) && + jQuery.nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ? + + elem.getElementsByTagName("tbody")[0] || + elem.appendChild( elem.ownerDocument.createElement("tbody") ) : + elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = (jQuery.find.attr( elem, "type" ) !== null) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + if ( match ) { + elem.type = match[1]; + } else { + elem.removeAttribute("type"); + } + return elem; +} + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var elem, + i = 0; + for ( ; (elem = elems[i]) != null; i++ ) { + jQuery._data( elem, "globalEval", !refElements || jQuery._data( refElements[i], "globalEval" ) ); + } +} + +function cloneCopyEvent( src, dest ) { + + if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { + return; + } + + var type, i, l, + oldData = jQuery._data( src ), + curData = jQuery._data( dest, oldData ), + events = oldData.events; + + if ( events ) { + delete curData.handle; + curData.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + + // make the cloned public data object a copy from the original + if ( curData.data ) { + curData.data = jQuery.extend( {}, curData.data ); + } +} + +function fixCloneNodeIssues( src, dest ) { + var nodeName, e, data; + + // We do not need to do anything for non-Elements + if ( dest.nodeType !== 1 ) { + return; + } + + nodeName = dest.nodeName.toLowerCase(); + + // IE6-8 copies events bound via attachEvent when using cloneNode. + if ( !support.noCloneEvent && dest[ jQuery.expando ] ) { + data = jQuery._data( dest ); + + for ( e in data.events ) { + jQuery.removeEvent( dest, e, data.handle ); + } + + // Event data gets referenced instead of copied if the expando gets copied too + dest.removeAttribute( jQuery.expando ); + } + + // IE blanks contents when cloning scripts, and tries to evaluate newly-set text + if ( nodeName === "script" && dest.text !== src.text ) { + disableScript( dest ).text = src.text; + restoreScript( dest ); + + // IE6-10 improperly clones children of object elements using classid. + // IE10 throws NoModificationAllowedError if parent is null, #12132. + } else if ( nodeName === "object" ) { + if ( dest.parentNode ) { + dest.outerHTML = src.outerHTML; + } + + // This path appears unavoidable for IE9. When cloning an object + // element in IE9, the outerHTML strategy above is not sufficient. + // If the src has innerHTML and the destination does not, + // copy the src.innerHTML into the dest.innerHTML. #10324 + if ( support.html5Clone && ( src.innerHTML && !jQuery.trim(dest.innerHTML) ) ) { + dest.innerHTML = src.innerHTML; + } + + } else if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + // IE6-8 fails to persist the checked state of a cloned checkbox + // or radio button. Worse, IE6-7 fail to give the cloned element + // a checked appearance if the defaultChecked value isn't also set + + dest.defaultChecked = dest.checked = src.checked; + + // IE6-7 get confused and end up setting the value of a cloned + // checkbox/radio button to an empty string instead of "on" + if ( dest.value !== src.value ) { + dest.value = src.value; + } + + // IE6-8 fails to return the selected option to the default selected + // state when cloning options + } else if ( nodeName === "option" ) { + dest.defaultSelected = dest.selected = src.defaultSelected; + + // IE6-8 fails to set the defaultValue to the correct value when + // cloning other types of input fields + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +jQuery.extend({ + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var destElements, node, clone, i, srcElements, + inPage = jQuery.contains( elem.ownerDocument, elem ); + + if ( support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { + clone = elem.cloneNode( true ); + + // IE<=8 does not properly clone detached, unknown element nodes + } else { + fragmentDiv.innerHTML = elem.outerHTML; + fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); + } + + if ( (!support.noCloneEvent || !support.noCloneChecked) && + (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { + + // We eschew Sizzle here for performance reasons: http://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + // Fix all IE cloning issues + for ( i = 0; (node = srcElements[i]) != null; ++i ) { + // Ensure that the destination node is not null; Fixes #9587 + if ( destElements[i] ) { + fixCloneNodeIssues( node, destElements[i] ); + } + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0; (node = srcElements[i]) != null; i++ ) { + cloneCopyEvent( node, destElements[i] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + destElements = srcElements = node = null; + + // Return the cloned set + return clone; + }, + + buildFragment: function( elems, context, scripts, selection ) { + var j, elem, contains, + tmp, tag, tbody, wrap, + l = elems.length, + + // Ensure a safe fragment + safe = createSafeFragment( context ), + + nodes = [], + i = 0; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || safe.appendChild( context.createElement("div") ); + + // Deserialize a standard representation + tag = (rtagName.exec( elem ) || [ "", "" ])[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + + tmp.innerHTML = wrap[1] + elem.replace( rxhtmlTag, "<$1>" ) + wrap[2]; + + // Descend through wrappers to the right content + j = wrap[0]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Manually add leading whitespace removed by IE + if ( !support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { + nodes.push( context.createTextNode( rleadingWhitespace.exec( elem )[0] ) ); + } + + // Remove IE's autoinserted from table fragments + if ( !support.tbody ) { + + // String was a , *may* have spurious + elem = tag === "table" && !rtbody.test( elem ) ? + tmp.firstChild : + + // String was a bare or + wrap[1] === "
" && !rtbody.test( elem ) ? + tmp : + 0; + + j = elem && elem.childNodes.length; + while ( j-- ) { + if ( jQuery.nodeName( (tbody = elem.childNodes[j]), "tbody" ) && !tbody.childNodes.length ) { + elem.removeChild( tbody ); + } + } + } + + jQuery.merge( nodes, tmp.childNodes ); + + // Fix #12392 for WebKit and IE > 9 + tmp.textContent = ""; + + // Fix #12392 for oldIE + while ( tmp.firstChild ) { + tmp.removeChild( tmp.firstChild ); + } + + // Remember the top-level container for proper cleanup + tmp = safe.lastChild; + } + } + } + + // Fix #11356: Clear elements from fragment + if ( tmp ) { + safe.removeChild( tmp ); + } + + // Reset defaultChecked for any radios and checkboxes + // about to be appended to the DOM in IE 6/7 (#8060) + if ( !support.appendChecked ) { + jQuery.grep( getAll( nodes, "input" ), fixDefaultChecked ); + } + + i = 0; + while ( (elem = nodes[ i++ ]) ) { + + // #4087 - If origin and destination elements are the same, and this is + // that element, do not do anything + if ( selection && jQuery.inArray( elem, selection ) !== -1 ) { + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( safe.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( (elem = tmp[ j++ ]) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + tmp = null; + + return safe; + }, + + cleanData: function( elems, /* internal */ acceptData ) { + var elem, type, id, data, + i = 0, + internalKey = jQuery.expando, + cache = jQuery.cache, + deleteExpando = support.deleteExpando, + special = jQuery.event.special; + + for ( ; (elem = elems[i]) != null; i++ ) { + if ( acceptData || jQuery.acceptData( elem ) ) { + + id = elem[ internalKey ]; + data = id && cache[ id ]; + + if ( data ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Remove cache only if it was not already removed by jQuery.event.remove + if ( cache[ id ] ) { + + delete cache[ id ]; + + // IE does not allow us to delete expando properties from nodes, + // nor does it have a removeAttribute function on Document nodes; + // we must handle all of these cases + if ( deleteExpando ) { + delete elem[ internalKey ]; + + } else if ( typeof elem.removeAttribute !== strundefined ) { + elem.removeAttribute( internalKey ); + + } else { + elem[ internalKey ] = null; + } + + deletedIds.push( id ); + } + } + } + } + } +}); + +jQuery.fn.extend({ + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); + }, null, value, arguments.length ); + }, + + append: function() { + return this.domManip( arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + }); + }, + + prepend: function() { + return this.domManip( arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + }); + }, + + before: function() { + return this.domManip( arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + }); + }, + + after: function() { + return this.domManip( arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + }); + }, + + remove: function( selector, keepData /* Internal Use Only */ ) { + var elem, + elems = selector ? jQuery.filter( selector, this ) : this, + i = 0; + + for ( ; (elem = elems[i]) != null; i++ ) { + + if ( !keepData && elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem ) ); + } + + if ( elem.parentNode ) { + if ( keepData && jQuery.contains( elem.ownerDocument, elem ) ) { + setGlobalEval( getAll( elem, "script" ) ); + } + elem.parentNode.removeChild( elem ); + } + } + + return this; + }, + + empty: function() { + var elem, + i = 0; + + for ( ; (elem = this[i]) != null; i++ ) { + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + } + + // Remove any remaining nodes + while ( elem.firstChild ) { + elem.removeChild( elem.firstChild ); + } + + // If this is a select, ensure that it displays empty (#12336) + // Support: IE<9 + if ( elem.options && jQuery.nodeName( elem, "select" ) ) { + elem.options.length = 0; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map(function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + }); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined ) { + return elem.nodeType === 1 ? + elem.innerHTML.replace( rinlinejQuery, "" ) : + undefined; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + ( support.htmlSerialize || !rnoshimcache.test( value ) ) && + ( support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && + !wrapMap[ (rtagName.exec( value ) || [ "", "" ])[ 1 ].toLowerCase() ] ) { + + value = value.replace( rxhtmlTag, "<$1>" ); + + try { + for (; i < l; i++ ) { + // Remove element nodes and prevent memory leaks + elem = this[i] || {}; + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch(e) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var arg = arguments[ 0 ]; + + // Make the changes, replacing each context element with the new content + this.domManip( arguments, function( elem ) { + arg = this.parentNode; + + jQuery.cleanData( getAll( this ) ); + + if ( arg ) { + arg.replaceChild( elem, this ); + } + }); + + // Force removal if there was no new content (e.g., from empty arguments) + return arg && (arg.length || arg.nodeType) ? this : this.remove(); + }, + + detach: function( selector ) { + return this.remove( selector, true ); + }, + + domManip: function( args, callback ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var first, node, hasScripts, + scripts, doc, fragment, + i = 0, + l = this.length, + set = this, + iNoClone = l - 1, + value = args[0], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return this.each(function( index ) { + var self = set.eq( index ); + if ( isFunction ) { + args[0] = value.call( this, index, self.html() ); + } + self.domManip( args, callback ); + }); + } + + if ( l ) { + fragment = jQuery.buildFragment( args, this[ 0 ].ownerDocument, false, this ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + if ( first ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( this[i], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !jQuery._data( node, "globalEval" ) && jQuery.contains( doc, node ) ) { + + if ( node.src ) { + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + jQuery.globalEval( ( node.text || node.textContent || node.innerHTML || "" ).replace( rcleanScript, "" ) ); + } + } + } + } + + // Fix #11809: Avoid leaking memory + fragment = first = null; + } + } + + return this; + } +}); + +jQuery.each({ + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + i = 0, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone(true); + jQuery( insert[i] )[ original ]( elems ); + + // Modern browsers can apply jQuery collections as arrays, but oldIE needs a .get() + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +}); + + +var iframe, + elemdisplay = {}; + +/** + * Retrieve the actual display of a element + * @param {String} name nodeName of the element + * @param {Object} doc Document object + */ +// Called only from within defaultDisplay +function actualDisplay( name, doc ) { + var style, + elem = jQuery( doc.createElement( name ) ).appendTo( doc.body ), + + // getDefaultComputedStyle might be reliably used only on attached element + display = window.getDefaultComputedStyle && ( style = window.getDefaultComputedStyle( elem[ 0 ] ) ) ? + + // Use of this method is a temporary fix (more like optmization) until something better comes along, + // since it was removed from specification and supported only in FF + style.display : jQuery.css( elem[ 0 ], "display" ); + + // We don't have any data stored on the element, + // so use "detach" method as fast way to get rid of the element + elem.detach(); + + return display; +} + +/** + * Try to determine the default display value of an element + * @param {String} nodeName + */ +function defaultDisplay( nodeName ) { + var doc = document, + display = elemdisplay[ nodeName ]; + + if ( !display ) { + display = actualDisplay( nodeName, doc ); + + // If the simple way fails, read from inside an iframe + if ( display === "none" || !display ) { + + // Use the already-created iframe if possible + iframe = (iframe || jQuery( "