diff --git a/dev/doc/.buildinfo b/dev/doc/.buildinfo deleted file mode 100644 index fde5b8bfbc436bbc4a03de16618279f8cdd8f28b..0000000000000000000000000000000000000000 --- a/dev/doc/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 4d7a146cda87e1e0222ce8a24b0ea6b4 -tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/dev/doc/_images/FullyConnected.jpg b/dev/doc/_images/FullyConnected.jpg deleted file mode 100644 index b2241f401434e527f95ee4e0e541a3f2ff78fd1e..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/FullyConnected.jpg and /dev/null differ diff --git a/dev/doc/_images/NetContinuous_en.png b/dev/doc/_images/NetContinuous_en.png deleted file mode 100644 index 7bdef1aa366711806585d35c8653c987fd63d59e..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/NetContinuous_en.png and /dev/null differ diff --git a/dev/doc/_images/NetConv_en.png b/dev/doc/_images/NetConv_en.png deleted file mode 100644 index ad618d1d6f8f4839f566f5f5cb5db37a4b7d9093..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/NetConv_en.png and /dev/null differ diff --git a/dev/doc/_images/NetLR_en.png b/dev/doc/_images/NetLR_en.png deleted file mode 100644 index 9d514bf1b18a0c330f98c28785e5d008f409fc1d..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/NetLR_en.png and /dev/null differ diff --git a/dev/doc/_images/NetRNN_en.png b/dev/doc/_images/NetRNN_en.png deleted file mode 100644 index 180f273d32ea59dc8ececa69c08e249f79f9d4f7..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/NetRNN_en.png and /dev/null differ diff --git a/dev/doc/_images/PipelineNetwork_en.jpg b/dev/doc/_images/PipelineNetwork_en.jpg deleted file mode 100644 index e779aed06d5cdb2b442754e7915e79b72946418e..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/PipelineNetwork_en.jpg and /dev/null differ diff --git a/dev/doc/_images/PipelineTest_en.png b/dev/doc/_images/PipelineTest_en.png deleted file mode 100644 index 7e7ef520b5effa2f43fd2964048f05c42f2ea890..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/PipelineTest_en.png and /dev/null differ diff --git a/dev/doc/_images/PipelineTrain_en.png b/dev/doc/_images/PipelineTrain_en.png deleted file mode 100644 index 132d29bfd5d678d2518161d0b5ed2e16a233a048..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/PipelineTrain_en.png and /dev/null differ diff --git a/dev/doc/_images/Pipeline_en.jpg b/dev/doc/_images/Pipeline_en.jpg deleted file mode 100644 index 21a7a7bb6a1af746120e6f4f51f797b6aaafb9d8..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/Pipeline_en.jpg and /dev/null differ diff --git a/dev/doc/_images/add_security_group.png b/dev/doc/_images/add_security_group.png deleted file mode 100644 index bd34f46c9b0ada7027fd53e553e7d033255d25fc..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/add_security_group.png and /dev/null differ diff --git a/dev/doc/_images/bi_lstm.jpg b/dev/doc/_images/bi_lstm.jpg deleted file mode 100644 index adec1606d64d6e35ffe7e62abfa9a09309b05c84..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/bi_lstm.jpg and /dev/null differ diff --git a/dev/doc/_images/bi_lstm1.jpg b/dev/doc/_images/bi_lstm1.jpg deleted file mode 100644 index adec1606d64d6e35ffe7e62abfa9a09309b05c84..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/bi_lstm1.jpg and /dev/null differ diff --git a/dev/doc/_images/cifar.png b/dev/doc/_images/cifar.png deleted file mode 100644 index f54a0c58837cb3385b32dc57d02cec92666ef0f1..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/cifar.png and /dev/null differ diff --git a/dev/doc/_images/create_efs.png b/dev/doc/_images/create_efs.png deleted file mode 100644 index e5f1526033d1daf401700989af1d25919bcb7675..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/create_efs.png and /dev/null differ diff --git a/dev/doc/_images/curve.jpg b/dev/doc/_images/curve.jpg deleted file mode 100644 index baa35ae7f0a0b6c246f3a0d331735477ab8bcd70..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/curve.jpg and /dev/null differ diff --git a/dev/doc/_images/encoder-decoder-attention-model.png b/dev/doc/_images/encoder-decoder-attention-model.png deleted file mode 100644 index 79f911d4ba12ac0c0d1a936c9df639c302786914..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/encoder-decoder-attention-model.png and /dev/null differ diff --git a/dev/doc/_images/encoder-decoder-attention-model1.png b/dev/doc/_images/encoder-decoder-attention-model1.png deleted file mode 100644 index 79f911d4ba12ac0c0d1a936c9df639c302786914..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/encoder-decoder-attention-model1.png and /dev/null differ diff --git a/dev/doc/_images/feature.jpg b/dev/doc/_images/feature.jpg deleted file mode 100644 index 0e3310e4ace5613917e7779d3198ccbb3cdc5ada..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/feature.jpg and /dev/null differ diff --git a/dev/doc/_images/gan.png b/dev/doc/_images/gan.png deleted file mode 100644 index 0eafd7cb49b545f412f8e775804bcd0b22c42454..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/gan.png and /dev/null differ diff --git a/dev/doc/_images/image_classification.png b/dev/doc/_images/image_classification.png deleted file mode 100644 index 14f255805081c1b4fab27eaf336fd389fa93ca19..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/image_classification.png and /dev/null differ diff --git a/dev/doc/_images/lenet.png b/dev/doc/_images/lenet.png deleted file mode 100644 index 1e6f2b32bad797f3fccb929c72a121fc935b0cbb..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/lenet.png and /dev/null differ diff --git a/dev/doc/_images/lstm.png b/dev/doc/_images/lstm.png deleted file mode 100644 index aaf1fc690da2ffb8418cde5ed81848ddb5263030..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/lstm.png and /dev/null differ diff --git a/dev/doc/_images/mnist_sample.png b/dev/doc/_images/mnist_sample.png deleted file mode 100644 index f9c7bf7ddd7f148eac4fe347e9c38afaa8876760..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/mnist_sample.png and /dev/null differ diff --git a/dev/doc/_images/network_arch.png b/dev/doc/_images/network_arch.png deleted file mode 100644 index 4ae7864212f2a0a38102ee7ff600527ea99fec82..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/network_arch.png and /dev/null differ diff --git a/dev/doc/_images/neural-n-gram-model.png b/dev/doc/_images/neural-n-gram-model.png deleted file mode 100644 index f70b765b3fd69816345a79fc59adfea46008dbfd..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/neural-n-gram-model.png and /dev/null differ diff --git a/dev/doc/_images/nvvp1.png b/dev/doc/_images/nvvp1.png deleted file mode 100644 index 1af23ac3c52929b2b0645d2f9fa4d4c6db1f6e77..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/nvvp1.png and /dev/null differ diff --git a/dev/doc/_images/nvvp2.png b/dev/doc/_images/nvvp2.png deleted file mode 100644 index 177c9db708da6863d1075f3e615f5962dbe18b29..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/nvvp2.png and /dev/null differ diff --git a/dev/doc/_images/nvvp3.png b/dev/doc/_images/nvvp3.png deleted file mode 100644 index d8f393667d6569b6f1e61ffccac43fae5888b6db..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/nvvp3.png and /dev/null differ diff --git a/dev/doc/_images/nvvp4.png b/dev/doc/_images/nvvp4.png deleted file mode 100644 index 51f2f3e183295de6cf8ddaf2b3b8a0862aa35f01..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/nvvp4.png and /dev/null differ diff --git a/dev/doc/_images/parameters.png b/dev/doc/_images/parameters.png deleted file mode 100644 index 2ec67480951e21f0400bce1c34b3108dcd65c18c..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/parameters.png and /dev/null differ diff --git a/dev/doc/_images/plot.png b/dev/doc/_images/plot.png deleted file mode 100644 index a31f99791c670e18bb8c62b7604ec8cb0284ffb4..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/plot.png and /dev/null differ diff --git a/dev/doc/_images/pserver_and_trainer.png b/dev/doc/_images/pserver_and_trainer.png deleted file mode 100644 index f41fe48920590333ad332bb51eb18e03dc251541..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/pserver_and_trainer.png and /dev/null differ diff --git a/dev/doc/_images/rec_regression_network.png b/dev/doc/_images/rec_regression_network.png deleted file mode 100644 index 7d2b54d4fcf560cd5b667628f0012c3822efd9b2..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/rec_regression_network.png and /dev/null differ diff --git a/dev/doc/_images/resnet_block.jpg b/dev/doc/_images/resnet_block.jpg deleted file mode 100644 index e16bd3c624030c4c09b358a015b491141b42d8f1..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/resnet_block.jpg and /dev/null differ diff --git a/dev/doc/_images/route53_create_recordset.png b/dev/doc/_images/route53_create_recordset.png deleted file mode 100644 index 34e476c7beac30fcdde13fccc4cc8d08b4be3d35..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/route53_create_recordset.png and /dev/null differ diff --git a/dev/doc/_images/route53_create_zone.png b/dev/doc/_images/route53_create_zone.png deleted file mode 100644 index 25b7ddb831c5cba97f4b2edddd27da3234d621af..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/route53_create_zone.png and /dev/null differ diff --git a/dev/doc/_images/stacked_lstm.jpg b/dev/doc/_images/stacked_lstm.jpg deleted file mode 100644 index 4239055050966e0095e188a8c81d860711bce29d..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/stacked_lstm.jpg and /dev/null differ diff --git a/dev/doc/_images/uniform_sample.png b/dev/doc/_images/uniform_sample.png deleted file mode 100644 index e716c48e782019a757bed0cb443f2ed97386cbe2..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/uniform_sample.png and /dev/null differ diff --git a/dev/doc/_images/worker_security_group.png b/dev/doc/_images/worker_security_group.png deleted file mode 100644 index 57eb0265a34ad4223b69600d2a3dd355482e0bf5..0000000000000000000000000000000000000000 Binary files a/dev/doc/_images/worker_security_group.png and /dev/null differ diff --git a/dev/doc/_sources/about/index_en.rst.txt b/dev/doc/_sources/about/index_en.rst.txt deleted file mode 100644 index 065c430cdea802ed3c9f487cd00255b85a5598a5..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/about/index_en.rst.txt +++ /dev/null @@ -1,14 +0,0 @@ -ABOUT -======= - -PaddlPaddle is an easy-to-use, efficient, flexible and scalable deep learning platform, -which is originally developed by Baidu scientists and engineers for the purpose of applying deep learning to many products at Baidu. - -PaddlePaddle is now open source but far from complete, which is intended to be built upon, improved, scaled, and extended. -We hope to build an active open source community both by providing feedback and by actively contributing to the source code. - - -Credits --------- - -We owe many thanks to `all contributors and developers `_ of PaddlePaddle! diff --git a/dev/doc/_sources/api/index_en.rst.txt b/dev/doc/_sources/api/index_en.rst.txt deleted file mode 100644 index b7f470e1f8a9a1c720e7d70832ec069339ddc60f..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/index_en.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -API -=== - -Model Config API ----------------- - -.. toctree:: - :maxdepth: 1 - - v2/model_configs.rst \ No newline at end of file diff --git a/dev/doc/_sources/api/v1/data_provider/dataprovider_en.rst.txt b/dev/doc/_sources/api/v1/data_provider/dataprovider_en.rst.txt deleted file mode 100644 index 96efbb1da959daec561009fdcc95d353b191dec8..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/data_provider/dataprovider_en.rst.txt +++ /dev/null @@ -1,34 +0,0 @@ -Introduction -============== -DataProvider is a module that loads training or testing data into cpu or gpu -memory for the following triaining or testing process. - -For simple use, users can use Python :code:`PyDataProvider` to dynamically reads -the original data in any format or in any form, and then transfer them into a -data format PaddlePaddle requires. The process is extremly flexible and highly -customized, with sacrificing the efficiency only a little. This is extremly -useful when you have to dynamically generate certain kinds of data according to, -for example, the training performance. - -Besides, users also can customize a C++ :code:`DataProvider` for a more -complex usage, or for a higher efficiency. - -The following parameters are required to define in the PaddlePaddle network -configuration file (trainer_config.py): which DataProvider is chosen to used, -and specific parameters for DataProvider, including training file list -(train.list) and testing file list (test.list). - -Train.list and test.list are simply two plain text files, which defines path -of training or testing data. It is recommended that directly placing them into -the training directory, and reference to them by using a relative path ( -relative to the PaddePaddle program). - -Testing or evaluating will not be performed during training if the test.list is -not set or set to None. Otherwise, PaddlePaddle will evaluate the trained model -by the specified tesing data while training, every testing period (a user -defined command line parameter in PaddlePaddle) to prevent over-fitting. - -Each line of train.list and test.list is an absolute or relative path (relative -to the PaddePaddle program runtime) of data file. Fascinatingly more, each line -can also be a HDFS file path or a SQL connection string. As long as the user -assures how to access each file in DataProvider. diff --git a/dev/doc/_sources/api/v1/data_provider/pydataprovider2_en.rst.txt b/dev/doc/_sources/api/v1/data_provider/pydataprovider2_en.rst.txt deleted file mode 100644 index 30357be32538db4423ad0eaf899138256c84edc7..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/data_provider/pydataprovider2_en.rst.txt +++ /dev/null @@ -1,249 +0,0 @@ -.. _api_pydataprovider2: - -PyDataProvider2 -=============== - -We highly recommand users to use PyDataProvider2 to provide training or testing -data to PaddlePaddle. The user only needs to focus on how to read a single -sample from the original data file by using PyDataProvider2, leaving all of the -trivial work, including, transfering data into cpu/gpu memory, shuffle, binary -serialization to PyDataProvider2. PyDataProvider2 uses multithreading and a -fanscinating but simple cache strategy to optimize the efficiency of the data -providing process. - -DataProvider for the non-sequential model ------------------------------------------ - -Here we use the MNIST handwriting recognition data as an example to illustrate -how to write a simple PyDataProvider. - -MNIST is a handwriting classification data set. It contains 70,000 digital -grayscale images. Labels of the training sample range from 0 to 9. All the -images have been size-normalized and centered into images with the same size -of 28 x 28 pixels. - -A small part of the original data as an example is shown as below: - -.. literalinclude:: src/mnist_train.txt - -Each line of the data contains two parts, separated by :code:`;`. The first part is -label of an image. The second part contains 28x28 pixel float values. - -Just write path of the above data into train.list. It looks like this: - -.. literalinclude:: src/train.list - -The corresponding dataprovider is shown as below: - -.. literalinclude:: src/mnist_provider.dict.py - -The first line imports PyDataProvider2 package. -The main function is the process function, that has two parameters. -The first parameter is the settings, which is not used in this example. -The second parameter is the filename, that is exactly each line of train.list. -This parameter is passed to the process function by PaddlePaddle. - -:code:`@provider` is a Python -`Decorator `_ . -It sets some properties to DataProvider, and constructs a real PaddlePaddle -DataProvider from a very simple user implemented python function. It does not -matter if you are not familiar with `Decorator`_. You can keep it simple by -just taking :code:`@provider` as a fixed mark above the provider function you -implemented. - -`input_types`_ defines the data format that a DataProvider returns. -In this example, it is set to a 28x28-dimensional dense vector and an integer -scalar, whose value ranges from 0 to 9. -`input_types`_ can be set to several kinds of input formats, please refer to the -document of `input_types`_ for more details. - - -The process method is the core part to construct a real DataProvider in -PaddlePaddle. It implements how to open the text file, how to read one sample -from the original text file, convert them into `input_types`_, and give them -back to PaddlePaddle process at line 23. -Note that data yielded by the process function must follow the same order that -`input_types`_ are defined. - - -With the help of PyDataProvider2, user can focus on how to generate ONE traning -sample by using keywords :code:`yield`. -:code:`yield` is a python keyword, and a concept related to it includes -:code:`generator`. - -Only a few lines of codes need to be added into the training configuration file, -you can take this as an example. - -.. literalinclude:: src/mnist_config.py - -Here we specify training data by :code:`train.list`, and no testing data is specified. -The method which actually provide data is :code:`process`. - -User also can use another style to provide data, which defines the -:code:`data_layer`'s name explicitly when `yield`. For example, -the :code:`dataprovider` is shown as below. - -.. literalinclude:: src/mnist_provider.dict.py - :linenos: - -If user did't give the :code:`data_layer`'s name, PaddlePaddle will use -the order of :code:`data_layer` definition roughly to determine which feature to -which :code:`data_layer`. This order may be not correct, so TO DEFINE THE -:code:`data_layer`'s NAMES EXPLICITLY IS THE RECOMMANDED WAY TO PROVIDER DATA. - -Now, this simple example of using PyDataProvider is finished. -The only thing that the user should know is how to generte **one sample** from -**one data file**. -And PaddlePadle will do all of the rest things\: - -* Form a training batch -* Shuffle the training data -* Read data with multithreading -* Cache the training data (Optional) -* CPU-> GPU double buffering. - -Is this cool? - -.. _api_pydataprovider2_sequential_model: - -DataProvider for the sequential model -------------------------------------- -A sequence model takes sequences as its input. A sequence is made up of several -timesteps. The so-called timestep, is not necessary to have something to do -with time. It can also be explained to that the order of data are taken into -consideration into model design and training. -For example, the sentence can be interpreted as a kind of sequence data in NLP -tasks. - -Here is an example on data proivider for English sentiment classification data. -The original input data are simple English text, labeled into positive or -negative sentiment (marked by 0 and 1 respectively). - -A small part of the original data as an example can be found in the path below: - -.. literalinclude:: src/sentimental_train.txt - -The corresponding data provider can be found in the path below: - -.. literalinclude:: src/sentimental_provider.py - -This data provider for sequential model is a little more complex than that -for MINST dataset. -A new initialization method is introduced here. -The method :code:`on_init` is configured to DataProvider by :code:`@provider`'s -:code:`init_hook` parameter, and it will be invoked once DataProvider is -initialized. The :code:`on_init` function has the following parameters: - -* The first parameter is the settings object. -* The rest parameters are passed by key word arguments. Some of them are passed - by PaddlePaddle, see reference for `init_hook`_. - The :code:`dictionary` object is a python dict object passed from the trainer - configuration file, and it maps word string to word id. - -To pass these parameters into DataProvider, the following lines should be added -into trainer configuration file. - -.. literalinclude:: src/sentimental_config.py - -The definition is basically same as MNIST example, except: -* Load dictionary in this configuration -* Pass it as a parameter to the DataProvider - -The `input_types` is configured in method :code:`on_init`. It has the same -effect to configure them by :code:`@provider`'s :code:`input_types` parameter. -However, the :code:`input_types` is set at runtime, so we can set it to -different types according to the input data. Input of the neural network is a -sequence of word id, so set :code:`seq_type` to :code:`integer_value_sequence`. - -Durning :code:`on_init`, we save :code:`dictionary` variable to -:code:`settings`, and it will be used in :code:`process`. Note the settings -parameter for the process function and for the on_init's function are a same -object. - -The basic processing logic is the same as MNIST's :code:`process` method. Each -sample in the data file is given back to PaddlePaddle process. - -Thus, the basic usage of PyDataProvider is here. -Please refer to the following section reference for details. - -Reference ---------- - -@provider -+++++++++ - -.. autofunction:: paddle.trainer.PyDataProvider2.provider - -input_types -+++++++++++ - -PaddlePaddle has four data types, and three sequence types. -The four data types are: - -* :code:`dense_vector`: dense float vector. -* :code:`sparse_binary_vector`: sparse binary vector, most of the value is 0, and - the non zero elements are fixed to 1. -* :code:`sparse_float_vector`: sparse float vector, most of the value is 0, and some - non zero elements can be any float value. They are given by the user. -* :code:`integer`: an integer scalar, that is especially used for label or word index. - -The three sequence types are: - -* :code:`SequenceType.NO_SEQUENCE` means the sample is not a sequence. -* :code:`SequenceType.SEQUENCE` means the sample is a sequence. -* :code:`SequenceType.SUB_SEQUENCE` means it is a nested sequence, that each timestep of - the input sequence is also a sequence. - -Different input type has a defferenct input format. Their formats are shown -in the above table. - -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ -| | NO_SEQUENCE | SEQUENCE | SUB_SEQUENCE | -+======================+=====================+===================================+================================================+ -| dense_vector | [f, f, ...] | [[f, ...], [f, ...], ...] | [[[f, ...], ...], [[f, ...], ...],...] | -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ -| sparse_binary_vector | [i, i, ...] | [[i, ...], [i, ...], ...] | [[[i, ...], ...], [[i, ...], ...],...] | -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ -| sparse_float_vector | [(i,f), (i,f), ...] | [[(i,f), ...], [(i,f), ...], ...] | [[[(i,f), ...], ...], [[(i,f), ...], ...],...] | -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ -| integer_value | i | [i, i, ...] | [[i, ...], [i, ...], ...] | -+----------------------+---------------------+-----------------------------------+------------------------------------------------+ - -where f represents a float value, i represents an integer value. - -init_hook -+++++++++ - -init_hook is a function that is invoked once the data provoder is initialized. -Its parameters lists as follows: - -* The first parameter is a settings object, which is the same to :code:`settings` - in :code:`process` method. The object contains several attributes, including: - - * :code:`settings.input_types`: the input types. Reference `input_types`_. - * :code:`settings.logger`: a logging object. - -* The rest parameters are the key word arguments. It is made up of PaddpePaddle - pre-defined parameters and user defined parameters. - - * PaddlePaddle-defined parameters including: - - * :code:`is_train` is a bool parameter that indicates the DataProvider is used in - training or testing. - * :code:`file_list` is the list of all files. - - * User-defined parameters args can be set in training configuration. - -Note, PaddlePaddle reserves the right to add pre-defined parameter, so please -use :code:`**kwargs` in init_hook to ensure compatibility by accepting the -parameters which your init_hook does not use. - -cache -+++++ -DataProvider provides two simple cache strategy. They are: - -* :code:`CacheType.NO_CACHE` means do not cache any data, then data is read at runtime by - the user implemented python module every pass. -* :code:`CacheType.CACHE_PASS_IN_MEM` means the first pass reads data by the user - implemented python module, and the rest passes will directly read data from - memory. diff --git a/dev/doc/_sources/api/v1/index_en.rst.txt b/dev/doc/_sources/api/v1/index_en.rst.txt deleted file mode 100644 index 10c297a71d6988c002de868e804ed9ee2345fbd7..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/index_en.rst.txt +++ /dev/null @@ -1,37 +0,0 @@ -API -=== - -DataProvider API ----------------- - -.. toctree:: - :maxdepth: 1 - - data_provider/dataprovider_en.rst - data_provider/pydataprovider2_en.rst - -.. _api_trainer_config: - -Model Config API ----------------- - -.. toctree:: - :maxdepth: 1 - - trainer_config_helpers/optimizers.rst - trainer_config_helpers/data_sources.rst - trainer_config_helpers/layers.rst - trainer_config_helpers/activations.rst - trainer_config_helpers/poolings.rst - trainer_config_helpers/networks.rst - trainer_config_helpers/evaluators.rst - trainer_config_helpers/attrs.rst - - -Applications API ----------------- - -.. toctree:: - :maxdepth: 1 - - predict/swig_py_paddle_en.rst diff --git a/dev/doc/_sources/api/v1/predict/swig_py_paddle_en.rst.txt b/dev/doc/_sources/api/v1/predict/swig_py_paddle_en.rst.txt deleted file mode 100644 index 1c628e6971fa5643e6a9ca629488049957686193..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/predict/swig_py_paddle_en.rst.txt +++ /dev/null @@ -1,59 +0,0 @@ -Python Prediction -================== - -PaddlePaddle offers a set of clean prediction interfaces for python with the help of -SWIG. The main steps of predict values in python are: - -* Parse training configurations -* Construct GradientMachine -* Prepare data -* Predict - -Here is a sample python script that shows the typical prediction process for the -MNIST classification problem. A complete sample code could be found at -:code:`src_root/doc/ui/predict/predict_sample.py`. - -.. literalinclude:: src/predict_sample.py - :language: python - :lines: 15-18,90-100,101-104 - -The module that does the most of the job is py_paddle.swig_paddle, it's -generated by SWIG and has complete documents, for more details you can use -python's :code:`help()` function. Let's walk through the above python script: - -* At the beginning, use :code:`swig_paddle.initPaddle()` to initialize - PaddlePaddle with command line arguments, for more about command line arguments - see :ref:`cmd_detail_introduction` . -* Parse the configuration file that is used in training with :code:`parse_config()`. - Because data to predict with always have no label, and output of prediction work - normally is the output layer rather than the cost layer, so you should modify - the configuration file accordingly before using it in the prediction work. -* Create a neural network with - :code:`swig_paddle.GradientMachine.createFromConfigproto()`, which takes the - parsed configuration :code:`conf.model_config` as argument. Then load the - trained parameters from the model with :code:`network.loadParameters()`. -* Create a data converter object of utility class :code:`DataProviderConverter`. - - Note: As swig_paddle can only accept C++ matrices, we offer a utility - class DataProviderConverter that can accept the same input data with - PyDataProvider2, for more information please refer to document - of :ref:`api_pydataprovider2` . -* Do the prediction with :code:`forwardTest()`, which takes the converted - input data and outputs the activations of the output layer. - -Here is a typical output: - -.. code-block:: text - - [{'id': None, 'value': array([[ 5.53018653e-09, 1.12194102e-05, 1.96644767e-09, - 1.43630644e-02, 1.51111044e-13, 9.85625684e-01, - 2.08823112e-10, 2.32777140e-08, 2.00186201e-09, - 1.15501715e-08], - [ 9.99982715e-01, 1.27787406e-10, 1.72296313e-05, - 1.49316648e-09, 1.36540484e-11, 6.93137714e-10, - 2.70634608e-08, 3.48565123e-08, 5.25639710e-09, - 4.48684503e-08]], dtype=float32)}] - -:code:`value` is the output of the output layer, each row represents result of -the corresponding row in the input data, each element represents activation of -the corresponding neuron in the output layer. - diff --git a/dev/doc/_sources/api/v1/trainer_config_helpers/activations.rst.txt b/dev/doc/_sources/api/v1/trainer_config_helpers/activations.rst.txt deleted file mode 100644 index 269e6491e7ebe3899c3fb24fca756a393043473b..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/trainer_config_helpers/activations.rst.txt +++ /dev/null @@ -1,108 +0,0 @@ -=========== -Activations -=========== - -BaseActivation -============== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: BaseActivation - :noindex: - -AbsActivation -=============== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: AbsActivation - :noindex: - -ExpActivation -=============== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: ExpActivation - :noindex: - -IdentityActivation -================== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: IdentityActivation - :noindex: - -LinearActivation -================== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: LinearActivation - :noindex: - -LogActivation -================== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: LogActivation - :noindex: - -SquareActivation -================ - -.. automodule:: paddle.trainer_config_helpers.activations - :members: SquareActivation - :noindex: - -SigmoidActivation -================= - -.. automodule:: paddle.trainer_config_helpers.activations - :members: SigmoidActivation - :noindex: - -SoftmaxActivation -================= - -.. automodule:: paddle.trainer_config_helpers.activations - :members: SoftmaxActivation - :noindex: - -SequenceSoftmaxActivation -========================= - -.. automodule:: paddle.trainer_config_helpers.activations - :members: SequenceSoftmaxActivation - :noindex: - -ReluActivation -============== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: ReluActivation - :noindex: - -BReluActivation -=============== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: BReluActivation - :noindex: - -SoftReluActivation -================== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: SoftReluActivation - :noindex: - -TanhActivation -============== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: TanhActivation - :noindex: - -STanhActivation -=============== - -.. automodule:: paddle.trainer_config_helpers.activations - :members: STanhActivation - :noindex: diff --git a/dev/doc/_sources/api/v1/trainer_config_helpers/attrs.rst.txt b/dev/doc/_sources/api/v1/trainer_config_helpers/attrs.rst.txt deleted file mode 100644 index ac63127bf7d9db6351365ab7b58f43db12347a8e..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/trainer_config_helpers/attrs.rst.txt +++ /dev/null @@ -1,5 +0,0 @@ -Parameter Attributes -======================= - -.. automodule:: paddle.trainer_config_helpers.attrs - :members: diff --git a/dev/doc/_sources/api/v1/trainer_config_helpers/data_sources.rst.txt b/dev/doc/_sources/api/v1/trainer_config_helpers/data_sources.rst.txt deleted file mode 100644 index b9dd4dda01ae59d1260356aff50ddf298d02c87f..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/trainer_config_helpers/data_sources.rst.txt +++ /dev/null @@ -1,7 +0,0 @@ -.. _api_trainer_config_helpers_data_sources: - -DataSources -=========== - -.. automodule:: paddle.trainer_config_helpers.data_sources - :members: diff --git a/dev/doc/_sources/api/v1/trainer_config_helpers/evaluators.rst.txt b/dev/doc/_sources/api/v1/trainer_config_helpers/evaluators.rst.txt deleted file mode 100644 index 11dc735164284d6ed1d661fab1e7690d263b3a7c..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/trainer_config_helpers/evaluators.rst.txt +++ /dev/null @@ -1,108 +0,0 @@ -.. _api_trainer_config_helpers_evaluators: - -========== -Evaluators -========== - -Base -==== -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: evaluator_base - :noindex: - -Classification -============== - -classification_error_evaluator ------------------------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: classification_error_evaluator - :noindex: - -auc_evaluator -------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: auc_evaluator - :noindex: - -ctc_error_evaluator -------------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: ctc_error_evaluator - :noindex: - -chunk_evaluator ---------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: chunk_evaluator - :noindex: - -precision_recall_evaluator --------------------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: precision_recall_evaluator - :noindex: - -Rank -==== - -pnpair_evaluator ----------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: pnpair_evaluator - :noindex: - -Utils -===== - -sum_evaluator -------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: sum_evaluator - :noindex: - -column_sum_evaluator --------------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: column_sum_evaluator - :noindex: - -Print -===== - -classification_error_printer_evaluator --------------------------------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: classification_error_printer_evaluator - :noindex: - -gradient_printer_evaluator --------------------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: gradient_printer_evaluator - :noindex: - -maxid_printer_evaluator ------------------------ -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: maxid_printer_evaluator - :noindex: - -maxframe_printer_evaluator ---------------------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: maxframe_printer_evaluator - :noindex: - -seqtext_printer_evaluator -------------------------- -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: seqtext_printer_evaluator - :noindex: - -value_printer_evaluator ------------------------ -.. automodule:: paddle.trainer_config_helpers.evaluators - :members: value_printer_evaluator - :noindex: - diff --git a/dev/doc/_sources/api/v1/trainer_config_helpers/layers.rst.txt b/dev/doc/_sources/api/v1/trainer_config_helpers/layers.rst.txt deleted file mode 100644 index bbea823de4d870f8a4384b6a85ebb7e8182797fe..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/trainer_config_helpers/layers.rst.txt +++ /dev/null @@ -1,502 +0,0 @@ -.. _api_trainer_config_helpers_layers: - -====== -Layers -====== - -Base -====== - -LayerType ---------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: LayerType - :noindex: - -LayerOutput ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: LayerOutput - :noindex: - -Data layer -=========== - -.. _api_trainer_config_helpers_layers_data_layer: - -data_layer ----------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: data_layer - :noindex: - -Fully Connected Layers -====================== - -.. _api_trainer_config_helpers_layers_fc_layer: - -fc_layer --------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: fc_layer - :noindex: - -selective_fc_layer ------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: selective_fc_layer - :noindex: - -Conv Layers -=========== - -conv_operator -------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: conv_operator - :noindex: - -conv_projection ---------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: conv_projection - :noindex: - -conv_shift_layer ------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: conv_shift_layer - :noindex: - -img_conv_layer --------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: img_conv_layer - :noindex: - -.. _api_trainer_config_helpers_layers_context_projection: - -context_projection ------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: context_projection - :noindex: - -Image Pooling Layer -=================== - -img_pool_layer --------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: img_pool_layer - :noindex: - -spp_layer --------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: spp_layer - :noindex: - -maxout_layer ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: maxout_layer - :noindex: - -Norm Layer -========== - -img_cmrnorm_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: img_cmrnorm_layer - :noindex: - -batch_norm_layer ---------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: batch_norm_layer - :noindex: - -sum_to_one_norm_layer ---------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: sum_to_one_norm_layer - :noindex: - -Recurrent Layers -================ - -recurrent_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: recurrent_layer - :noindex: - -lstmemory ---------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: lstmemory - :noindex: - -grumemory ---------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: grumemory - :noindex: - -Recurrent Layer Group -===================== - -memory ------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: memory - :noindex: - -recurrent_group ---------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: recurrent_group - :noindex: - -lstm_step_layer ---------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: lstm_step_layer - :noindex: - -gru_step_layer ---------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: gru_step_layer - :noindex: - -beam_search ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: beam_search - :noindex: - -get_output_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: get_output_layer - :noindex: - -Mixed Layer -=========== - -.. _api_trainer_config_helpers_layers_mixed_layer: - -mixed_layer ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: mixed_layer - :noindex: - -.. _api_trainer_config_helpers_layers_embedding_layer: - -embedding_layer ---------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: embedding_layer - :noindex: - -scaling_projection ------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: scaling_projection - :noindex: - -dotmul_projection ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: dotmul_projection - :noindex: - -dotmul_operator ---------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: dotmul_operator - :noindex: - -full_matrix_projection ----------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: full_matrix_projection - :noindex: - -identity_projection -------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: identity_projection - :noindex: - - -table_projection ----------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: table_projection - :noindex: - -trans_full_matrix_projection ----------------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: trans_full_matrix_projection - :noindex: - -Aggregate Layers -================ - -.. _api_trainer_config_helpers_layers_pooling_layer: - -pooling_layer -------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: pooling_layer - :noindex: - -.. _api_trainer_config_helpers_layers_last_seq: - -last_seq --------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: last_seq - :noindex: - -.. _api_trainer_config_helpers_layers_first_seq: - -first_seq ---------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: first_seq - :noindex: - -concat_layer ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: concat_layer - :noindex: - -seq_concat_layer ----------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: seq_concat_layer - :noindex: - -Reshaping Layers -================ - -block_expand_layer ------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: block_expand_layer - :noindex: - -.. _api_trainer_config_helpers_layers_expand_layer: - -expand_layer ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: expand_layer - :noindex: - -repeat_layer ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: repeat_layer - :noindex: - -rotate_layer ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: rotate_layer - :noindex: - -seq_reshape_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: seq_reshape_layer - :noindex: - -Math Layers -=========== - -addto_layer ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: addto_layer - :noindex: - -linear_comb_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: linear_comb_layer - :noindex: - -interpolation_layer -------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: interpolation_layer - :noindex: - -bilinear_interp_layer ----------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: bilinear_interp_layer - :noindex: - -power_layer ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: power_layer - :noindex: - -scaling_layer -------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: scaling_layer - :noindex: - -slope_intercept_layer ----------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: slope_intercept_layer - :noindex: - -tensor_layer ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: tensor_layer - :noindex: - -.. _api_trainer_config_helpers_layers_cos_sim: - -cos_sim -------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: cos_sim - :noindex: - -trans_layer ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: trans_layer - :noindex: - -Sampling Layers -=============== - -maxid_layer ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: maxid_layer - :noindex: - -sampling_id_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: sampling_id_layer - :noindex: - -Slicing and Joining Layers -========================== - -pad_layer ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: pad_layer - :noindex: - -.. _api_trainer_config_helpers_layers_cost_layers: - -Cost Layers -=========== - -cross_entropy -------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: cross_entropy - :noindex: - -cross_entropy_with_selfnorm ---------------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: cross_entropy_with_selfnorm - :noindex: - -multi_binary_label_cross_entropy --------------------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: multi_binary_label_cross_entropy - :noindex: - -huber_cost ----------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: huber_cost - :noindex: - -lambda_cost ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: lambda_cost - :noindex: - -rank_cost ---------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: rank_cost - :noindex: - -crf_layer ------------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: crf_layer - :noindex: - -crf_decoding_layer -------------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: crf_decoding_layer - :noindex: - -ctc_layer ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: ctc_layer - :noindex: - -warp_ctc_layer --------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: warp_ctc_layer - :noindex: - -nce_layer ------------ -.. automodule:: paddle.trainer_config_helpers.layers - :members: nce_layer - :noindex: - -hsigmoid ---------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: hsigmoid - :noindex: - -sum_cost ---------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: sum_cost - :noindex: - -Check Layer -============ - -eos_layer ------------- -.. automodule:: paddle.trainer_config_helpers.layers - :members: eos_layer - :noindex: diff --git a/dev/doc/_sources/api/v1/trainer_config_helpers/networks.rst.txt b/dev/doc/_sources/api/v1/trainer_config_helpers/networks.rst.txt deleted file mode 100644 index edb53acbf0c31532aa34bda044066fed72eaa426..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/trainer_config_helpers/networks.rst.txt +++ /dev/null @@ -1,123 +0,0 @@ -======== -Networks -======== - -The networks module contains pieces of neural network that combine multiple layers. - -NLP -=== - -sequence_conv_pool ------------------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: sequence_conv_pool - :noindex: - -.. _api_trainer_config_helpers_network_text_conv_pool: - -text_conv_pool --------------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: text_conv_pool - :noindex: - -Images -====== - -img_conv_bn_pool ----------------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: img_conv_bn_pool - :noindex: - -img_conv_group --------------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: img_conv_group - :noindex: - -.. _api_trainer_config_helpers_network_simple_img_conv_pool: - -simple_img_conv_pool --------------------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: simple_img_conv_pool - :noindex: - -vgg_16_network ---------------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: vgg_16_network - :noindex: - -Recurrent -========= - -LSTM ----- - -lstmemory_unit -`````````````` -.. automodule:: paddle.trainer_config_helpers.networks - :members: lstmemory_unit - :noindex: - -lstmemory_group -``````````````` -.. automodule:: paddle.trainer_config_helpers.networks - :members: lstmemory_group - :noindex: - -simple_lstm -``````````` -.. automodule:: paddle.trainer_config_helpers.networks - :members: simple_lstm - :noindex: - -bidirectional_lstm -`````````````````` -.. automodule:: paddle.trainer_config_helpers.networks - :members: bidirectional_lstm - :noindex: - -GRU ---- - -gru_unit -```````` -.. automodule:: paddle.trainer_config_helpers.networks - :members: gru_unit - :noindex: - -gru_group -````````` -.. automodule:: paddle.trainer_config_helpers.networks - :members: gru_group - :noindex: - -simple_gru -`````````` -.. automodule:: paddle.trainer_config_helpers.networks - :members: simple_gru - :noindex: - -simple_attention ----------------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: simple_attention - :noindex: - -Miscs -===== - -dropout_layer --------------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: dropout_layer - :noindex: - -outputs -------- -.. automodule:: paddle.trainer_config_helpers.networks - :members: outputs - :noindex: diff --git a/dev/doc/_sources/api/v1/trainer_config_helpers/optimizers.rst.txt b/dev/doc/_sources/api/v1/trainer_config_helpers/optimizers.rst.txt deleted file mode 100644 index d2f4958c92b8e3b7426945f1af07112ab4071136..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/trainer_config_helpers/optimizers.rst.txt +++ /dev/null @@ -1,61 +0,0 @@ -.. _api_trainer_config_helpers_optimizers: - -========== -Optimizers -========== - -BaseSGDOptimizer -================ -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: BaseSGDOptimizer - :noindex: - -MomentumOptimizer -================= -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: MomentumOptimizer - :noindex: - -AdamOptimizer -============= -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: AdamOptimizer - :noindex: - -AdamaxOptimizer -================ -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: AdamaxOptimizer - :noindex: - -AdaGradOptimizer -================ -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: AdaGradOptimizer - :noindex: - -DecayedAdaGradOptimizer -======================= -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: DecayedAdaGradOptimizer - :noindex: - -AdaDeltaOptimizer -================= -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: AdaDeltaOptimizer - :noindex: - -RMSPropOptimizer -================ -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: RMSPropOptimizer - :noindex: - -.. _api_trainer_config_helpers_optimizers_settings: - -settings -======== -.. automodule:: paddle.trainer_config_helpers.optimizers - :members: settings - :noindex: diff --git a/dev/doc/_sources/api/v1/trainer_config_helpers/poolings.rst.txt b/dev/doc/_sources/api/v1/trainer_config_helpers/poolings.rst.txt deleted file mode 100644 index 66566809d26f59263597b5286c5b27e0bbc9415a..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v1/trainer_config_helpers/poolings.rst.txt +++ /dev/null @@ -1,33 +0,0 @@ -======== -Poolings -======== - -BasePoolingType -=============== -.. automodule:: paddle.trainer_config_helpers.poolings - :members: BasePoolingType - :noindex: - -AvgPooling -========== -.. automodule:: paddle.trainer_config_helpers.poolings - :members: AvgPooling - :noindex: - -MaxPooling -========== -.. automodule:: paddle.trainer_config_helpers.poolings - :members: MaxPooling - :noindex: - -SumPooling -========== -.. automodule:: paddle.trainer_config_helpers.poolings - :members: SumPooling - :noindex: - -SquareRootNPooling -================== -.. automodule:: paddle.trainer_config_helpers.poolings - :members: SquareRootNPooling - :noindex: diff --git a/dev/doc/_sources/api/v2/model_configs.rst.txt b/dev/doc/_sources/api/v2/model_configs.rst.txt deleted file mode 100644 index a9f33b33ef61bf846013364672ec26ae075d0300..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/api/v2/model_configs.rst.txt +++ /dev/null @@ -1,6 +0,0 @@ -====== -Layers -====== - -.. automodule:: paddle.v2.layer - :members: diff --git a/dev/doc/_sources/design/api.md.txt b/dev/doc/_sources/design/api.md.txt deleted file mode 100644 index 8185d2af0ea264a2e7b4e28b9ed05279e4a22014..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/design/api.md.txt +++ /dev/null @@ -1,262 +0,0 @@ -# PaddlePaddle Design Doc - -## Ingredients - -As our design principle is starting from the essence: how could we -allow users to express and solve their problems at neural networks. -Some essential concepts that our API have to provide include: - -1. A *topology* is an expression of *layers*. - -1. A layer could be any kind of computation, including *cost*. - -1. Some layers have parameters, some don't. Most costs don't have - parameters. - -1. In some topologies, layers share parameters. For - example, - [the network for training a ranking model](https://github.com/PaddlePaddle/Paddle/issues/1311#issuecomment-279121850). - -1. At programming time, users specify topologies and possible sharing - of parameters. PaddlePaddle can figure out and create parameters - required (and possibly shared) by one or more topologies. - - -## Starting from Examples - -As a summarization -of -[our disucssion](https://github.com/PaddlePaddle/Paddle/issues/1315), -let us present two examples here: - - -### Example 1. Sharing Parameters between Layers - -We use -the -[3-branch ranking](https://github.com/PaddlePaddle/Paddle/issues/1311#issuecomment-279121850) model -in this example. For your convenience, I copy-a-paste the model's -topology as follows: - -``` -A -> f -\ -Q -> f --> cost -B -> f -/ -``` - -The following program trains the topology including the cost, and then -use the sub-network in the trained topology in inference: - -```python -def f(in): - e = paddle.layer.embedding(in, parameter_name="embedding") - o = paddle.layer.softmax(e, parameter_name="semantic") - return o - -# Create 3 topologies (subnets), they share parameters because all -# correspoinding layers have the same parameter names. -fA = f(paddle.layer.data(input_name="A")) -fB = f(paddle.layer.data(input_name="B")) -fQ = f(paddle.layer.data(input_name="Q")) - -topology = paddle.layer.less_than( - paddle.layer.cross_entropy(fA, fQ), - paddle.layer.corss_entropy(fB, fQ)) - -# Derive parameters required in topology and create them in model. -parameters = paddle.parameters.create(topology) - -# Estimate parameters used in topology from data. -paddle.train(topology, parameters, reader=read_ranking_model_data) - -# Inference using fA (or fB or fC, as they share their parameters). -[testA, testB, testQ] = read_ranking_model_data() -print "The sematic-vector of testA: ", paddle.infer(fA, parameters, testA) -``` - - -### Example 2. Sharing Parameters between "Models" - -We use [GAN](https://github.com/PaddlePaddle/book/tree/develop/gan) in -this example. In the following example program, `d0` and `d1` -correspond to the two networks in the following figure: - - - -```python -def G(in): - # over-simplified example as G has only one layers: - return paddle.layer.fc(in, parameter_name="G") - -def D(in); - # again, over-simplified: - return paddle.layer.fc(in, parameter_name="D") - -# Construct the first topology, which contains both D and G. -# By learning this topology, we update parameters of G. -d0 = paddle.layer.should_be_false(D(G(paddle.layer.data()))) - -# Construct a second topology d1, which contains only D. By -# training this topology, we update parameters of D. Note -# that d1 share parameters with d0. -d1 = paddle.layer.should_be_true(D(paddle.layer.data())) - -# Create parameters from a list of multiple topologies (models) for -# the chance to share parameters between these topologies. -parameters = paddle.parameters.create([d0, d1]) - -# Iterative training of GAN. -for ...: - train(d0, parameters, reader=read_from_rng, immutable_parameters={"D"}) - train(d1, parameters, reader=read_from_realistic_images) - -# Use d1 for inference: -print "D thinks a batch of images are realistic ", infer(d1, parameters, read_mnist_images) -``` - - -### Summarization - - -Above two programs reveal some important design concerns: - -1. Users describe a topology as an expression of layers. Every layer - has a *parameter name*. If the users don't specify it explicitly, it's automatically generated as a unique name. By - specifying the parameter name, users can specify the sharing of - parameters between layers and even between topologies. - -1. `paddle.parameters.create` figures out parameters required by one - or more topologies from parameter names of layers. It creates these - parameters and returns a `ParameterSet` object, which is in essence - a map from *parameter names* to *parameters*. - -1. At training and inference time, `paddle.train` and `paddle.infer` - requires both a topology and the parameter set that holds the parameters of that topology. There are some reasons: - - 1. This prevents users from forgetting to call - `paddle.parameters.create`. - 1. `paddle.train` needs to know which parameter set to update. - 1. Users could load another (pre-trained) parameter set and use it - with a topology in `train.infer`. - -1. By specifying the `immutable_parameters` parameter of - `paddle.train`, we can forbid the update of these parameters. - - -## Reader - -Not all programming frameworks allow users to define I/O functions. -An example is Google MapReduce, which can only read from text, -SSTable, and RecordIO files. Hadoop MapReduce allows users to define -readers and writers by deriving from base classes `Reader` and -`Writer`. The former is less flexible but also less error-prone. We -decide to provide the flexibility to users to define their readers. - - -There are some open questions here: - -1. **Should a reader return a Python dictionary?** - -1. **How to map multiple outputs from a reader to multiple data layers?** - -1. **How to easily compose some existing readers to read more data and - feed a topology with more data layers?** - - -## Training - -The recommended way to training a model is to call `paddle.train`, -which simply calls `paddle.trainer.Default`, a global variable of -type `paddle.trainer.SGD`. Equivalently, we can do - -```python -opt = paddle.trainer.SGD(..., paddle.updater.Adam(...)) -opt.train(topology, parameters, reader=read, ...) -``` - -### Updater - -Please be aware that a trainer can accept an updater as its data -member, where an updater is a class derived from -`paddle.trainer.Updater`. This is to make it easier to customize -trainers, as discussed -[here](https://github.com/PaddlePaddle/Paddle/issues/1319). - -### Event Handler - -`paddle.train` and `paddle.trainer.XXX.train` take an optional -parameter `event_handler`, which should be either `None` or a function -that handle some events: - -1. BeginTraining -1. EndTraining -1. BeginIteration -1. EndIteration -1. BeginPass -1. EndPass - -where EndPass is sent if and only if the reader yields -`end_pass=True`. - -An example as follows: - -```python -def event_handler(event): - if ininstance(event, paddle.event.EndIteration): - print paddle.test(...) - -paddle.train(topology, parameters, reader, event_handler) -``` - -If we are writing a PaddlePaddle program in and for iPython/Jypyter, -we can use metaplotlib in the event handler to plot a curve of -cost/error versus iterations, as shown -[here](https://blog.dominodatalab.com/interactive-dashboards-in-jupyter/). - -### Distributed Training - -If users want to do distributed training on a cluster, s/he should -call `paddle.dist_train` and provides access tokens to the cluster as -a parameter. - -For example, if the user has a TLS certificate that allows him to -access a Kubernetes cluster, s/he should be able to call - -```python -paddle.dist_train(model, - trainer=paddle.trainer.SGD(..., - paddle.updater.Adam(...)), - reader=read, - k8s_user="yi", - k8s_token="kube_cluster_tls.pem", - k8s_job="hello", - num_parameter_servers=15) -``` - -The pseudo code if `paddle.dist_train` is as follows: - -```python -def dist_train(topology, parameters, trainer, reader, ...): - if os.getenv("KUBERNETES_SERVICE_HOST") == None: - image_name = k8s_user + '/' + k8s_job - docker_build(image_name) - docker_push() - kube_ctrl_start_job(image_name, k8s_user, k8s_token) - else: - rank = kube_list_containers_in_job_and_return_current_containers_rank() - if rank == 0: - master() - elif rank < 15: - parameter_server() - else: - trainer.train(model, reader=read) -``` - -Please be aware that if a process is running on the Kubernetes -cluster, it will have some environment variables pre-defined. - -If `dist_train` doesn't see these environment variables, it knows -that it's running on users' personal computer, and it should work as a -*launcher*. Otherwise, it knows that it's running on the cluster and -need to figure out its role as either the master, or a trainer, or a -parameter server. diff --git a/dev/doc/_sources/design/reader/README.md.txt b/dev/doc/_sources/design/reader/README.md.txt deleted file mode 100644 index 03119fdd74502a4534c2e6a576580ce96a721c7e..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/design/reader/README.md.txt +++ /dev/null @@ -1,202 +0,0 @@ -# Python Data Reader Design Doc - -At training and testing time, PaddlePaddle programs need to read data. To ease the users' work to write data reading code, we define that - -- A *reader* is a function that reads data (from file, network, random number generator, etc) and yields data items. -- A *reader creator* is a function that returns a reader function. -- A *reader decorator* is a function, which accepts one or more readers, and returns a reader. -- A *batch reader* is a function that reads data (from *reader*, file, network, random number generator, etc) and yields a batch of data items. - -and provide function which converts reader to batch reader, frequently used reader creators and reader decorators. - -## Data Reader Interface - -Indeed, *data reader* doesn't have to be a function that reads and yields data items. It can be any function with no parameter that creates a iterable (anything can be used in `for x in iterable`): - -``` -iterable = data_reader() -``` - -Element produced from the iterable should be a **single** entry of data, **not** a mini batch. That entry of data could be a single item, or a tuple of items. Item should be of [supported type](http://www.paddlepaddle.org/doc/ui/data_provider/pydataprovider2.html?highlight=dense_vector#input-types) (e.g., numpy 1d array of float32, int, list of int) - -An example implementation for single item data reader creator: - -```python -def reader_creator_random_image(width, height): - def reader(): - while True: - yield numpy.random.uniform(-1, 1, size=width*height) - return reader -``` - -An example implementation for multiple item data reader creator: -```python -def reader_creator_random_imageand_label(widht, height, label): - def reader(): - while True: - yield numpy.random.uniform(-1, 1, size=width*height), label - return reader -``` - -## Batch Reader Interface - -*batch reader* can be any function with no parameter that creates a iterable (anything can be used in `for x in iterable`). The output of the iterable should be a batch (list) of data items. Each item inside the list must be a tuple. - -Here are valid outputs: -```python -# a mini batch of three data items. Each data item consist three columns of data, each of which is 1. -[(1, 1, 1), -(2, 2, 2), -(3, 3, 3)] - -# a mini batch of three data items, each data item is a list (single column). -[([1,1,1],), -([2,2,2],), -([3,3,3],), -``` - -Please note that each item inside the list must be a tuple, below is an invalid output: -```python - # wrong, [1,1,1] needs to be inside a tuple: ([1,1,1],). - # Otherwise it's ambiguous whether [1,1,1] means a single column of data [1, 1, 1], - # or three column of datas, each of which is 1. -[[1,1,1], -[2,2,2], -[3,3,3]] -``` - -It's easy to convert from reader to batch reader: -```python -mnist_train = paddle.dataset.mnist.train() -mnist_train_batch_reader = paddle.batch(mnist_train, 128) -``` - -Also easy to create custom batch reader: -```python -def custom_batch_reader(): - while True: - batch = [] - for i in xrange(128): - batch.append((numpy.random.uniform(-1, 1, 28*28),)) # note that it's a tuple being appended. - yield batch - -mnist_random_image_batch_reader = custom_batch_reader -``` - -## Usage - -batch reader, mapping from item(s) read to data layer, batch size and number of total pass will be passed into `paddle.train`: - -```python -# two data layer is created: -image_layer = paddle.layer.data("image", ...) -label_layer = paddle.layer.data("label", ...) - -# ... -batch_reader = paddle.batch(paddle.dataset.mnist.train(), 128) -paddle.train(batch_reader, {"image":0, "label":1}, 128, 10, ...) -``` - -## Data Reader Decorator - -*Data reader decorator* takes a single or multiple data reader, returns a new data reader. It is similar to a [python decorator](https://wiki.python.org/moin/PythonDecorators), but it does not use `@` syntax. - -Since we have a strict interface for data readers (no parameter, return a single data item). Data reader can be used flexiable via data reader decorators. Following are a few examples: - -### Prefetch Data - -Since reading data may take time and training can not proceed without data. It is generally a good idea to prefetch data. - -Use `paddle.reader.buffered` to prefetch data: - -```python -buffered_reader = paddle.reader.buffered(paddle.dataset.mnist.train(), 100) -``` - -`buffered_reader` will try to buffer (prefetch) `100` data entries. - -### Compose Multiple Data Readers - -For example, we want to use a source of real images (reusing mnist dataset), and a source of random images as input for [Generative Adversarial Networks](https://arxiv.org/abs/1406.2661). - -We can do: - -```python -def reader_creator_random_image(width, height): - def reader(): - while True: - yield numpy.random.uniform(-1, 1, size=width*height) - return reader - -def reader_creator_bool(t): - def reader: - while True: - yield t - return reader - -true_reader = reader_creator_bool(True) -false_reader = reader_creator_bool(False) - -reader = paddle.reader.compose(paddle.dataset.mnist.train(), data_reader_creator_random_image(20, 20), true_reader, false_reader) -# Skipped 1 because paddle.dataset.mnist.train() produces two items per data entry. -# And we don't care second item at this time. -paddle.train(paddle.batch(reader, 128), {"true_image":0, "fake_image": 2, "true_label": 3, "false_label": 4}, ...) -``` - -### Shuffle - -Given shuffle buffer size `n`, `paddle.reader.shuffle` will return a data reader that buffers `n` data entries and shuffle them before a data entry is read. - -Example: -```python -reader = paddle.reader.shuffle(paddle.dataset.mnist.train(), 512) -``` - -## Q & A - -### Why reader return only a single entry, but not a mini batch? - -Always returning a single entry make reusing existing data readers much easier (e.g., if existing reader return not a single entry but 3 entries, training code will be more complex because it need to handle cases like batch size 2). - -We provide function `paddle.batch` to turn (single entry) reader into batch reader. - -### Why do we need batch reader, isn't train take reader and batch_size as arguments sufficient? - -In most of the case, train taking reader and batch_size as arguments would be sufficent. However sometimes user want to customize order of data entries inside a mini batch. Or even change batch size dynamically. - -### Why use a dictionary but not a list to provide mapping? - -We decided to use dictionary (`{"image":0, "label":1}`) instead of list (`["image", "label"]`) is because that user can easily resue item (e.g., using `{"image_a":0, "image_b":0, "label":1}`) or skip item (e.g., using `{"image_a":0, "label":2}`). - -### How to create custom data reader creator - -```python -def image_reader_creator(image_path, label_path, n): - def reader(): - f = open(image_path) - l = open(label_path) - images = numpy.fromfile( - f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') - images = images / 255.0 * 2.0 - 1.0 - labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") - for i in xrange(n): - yield images[i, :], labels[i] # a single entry of data is created each time - f.close() - l.close() - return reader - -# images_reader_creator creates a reader -reader = image_reader_creator("/path/to/image_file", "/path/to/label_file", 1024) -paddle.train(paddle.batch(reader, 128), {"image":0, "label":1}, ...) -``` - -### How is `paddle.train` implemented - -An example implementation of paddle.train could be: - -```python -def train(batch_reader, mapping, batch_size, total_pass): - for pass_idx in range(total_pass): - for mini_batch in batch_reader(): # this loop will never end in online learning. - do_forward_backward(mini_batch, mapping) -``` diff --git a/dev/doc/_sources/getstarted/basic_usage/index_en.rst.txt b/dev/doc/_sources/getstarted/basic_usage/index_en.rst.txt deleted file mode 100644 index c10b897d4292d0c2b062b5c8e23466505afa408a..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/getstarted/basic_usage/index_en.rst.txt +++ /dev/null @@ -1,101 +0,0 @@ -Simple Linear Regression -======================== - -PaddlePaddle is a deep learning platform open-sourced by Baidu. With PaddlePaddle, you can easily train a classic neural network within a couple lines of configuration, or you can build sophisticated models that provide state-of-the-art performance on difficult learning tasks like sentiment analysis, machine translation, image caption and so on. - -Problem Background ------------------- - -Now, to give you a hint of what using PaddlePaddle looks like, let's start with a fundamental learning problem - `simple linear regression `_: you have observed a set of two-dimensional data points of ``X`` and ``Y``, where ``X`` is an explanatory variable and ``Y`` is corresponding dependent variable, and you want to recover the underlying correlation between ``X`` and ``Y``. Linear regression can be used in many practical scenarios. For example, ``X`` can be a variable about house size, and ``Y`` a variable about house price. You can build a model that captures relationship between them by observing real estate markets. - -Prepare the Data ------------------ - -Suppose the true relationship can be characterized as ``Y = 2X + 0.3``, let's see how to recover this pattern only from observed data. Here is a piece of python code that feeds synthetic data to PaddlePaddle. The code is pretty self-explanatory, the only extra thing you need to add for PaddlePaddle is a definition of input data types. - - .. code-block:: python - - # dataprovider.py - from paddle.trainer.PyDataProvider2 import * - import random - - # define data types of input: 2 real numbers - @provider(input_types=[dense_vector(1), dense_vector(1)],use_seq=False) - def process(settings, input_file): - for i in xrange(2000): - x = random.random() - yield [x], [2*x+0.3] - -Train a NeuralNetwork ----------------------- - -To recover this relationship between ``X`` and ``Y``, we use a neural network with one layer of linear activation units and a square error cost layer. Don't worry if you are not familiar with these terminologies, it's just saying that we are starting from a random line ``Y' = wX + b`` , then we gradually adapt ``w`` and ``b`` to minimize the difference between ``Y'`` and ``Y``. Here is what it looks like in PaddlePaddle: - - .. code-block:: python - - # trainer_config.py - from paddle.trainer_config_helpers import * - - # 1. read data. Suppose you saved above python code as dataprovider.py - data_file = 'empty.list' - with open(data_file, 'w') as f: f.writelines(' ') - define_py_data_sources2(train_list=data_file, test_list=None, - module='dataprovider', obj='process',args={}) - - # 2. learning algorithm - settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer()) - - # 3. Network configuration - x = data_layer(name='x', size=1) - y = data_layer(name='y', size=1) - y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) - cost = regression_cost(input=y_predict, label=y) - outputs(cost) - -Some of the most fundamental usages of PaddlePaddle are demonstrated: - -- The first part shows how to feed data into PaddlePaddle. In general cases, PaddlePaddle reads raw data from a list of files, and then do some user-defined process to get real input. In this case, we only need to create a placeholder file since we are generating synthetic data on the fly. - -- The second part describes learning algorithm. It defines in what ways adjustments are made to model parameters. PaddlePaddle provides a rich set of optimizers, but a simple momentum based optimizer will suffice here, and it processes 12 data points each time. - -- Finally, the network configuration. It usually is as simple as "stacking" layers. Three kinds of layers are used in this configuration: - - **Data Layer**: a network always starts with one or more data layers. They provide input data to the rest of the network. In this problem, two data layers are used respectively for ``X`` and ``Y``. - - **FC Layer**: FC layer is short for Fully Connected Layer, which connects all the input units to current layer and does the actual computation specified as activation function. Computation layers like this are the fundamental building blocks of a deeper model. - - **Cost Layer**: in training phase, cost layers are usually the last layers of the network. They measure the performance of current model, and provide guidence to adjust parameters. - -Now that everything is ready, you can train the network with a simple command line call: - - .. code-block:: bash - - paddle train --config=trainer_config.py --save_dir=./output --num_passes=30 - - -This means that PaddlePaddle will train this network on the synthectic dataset for 30 passes, and save all the models under path ``./output``. You will see from the messages printed out during training phase that the model cost is decreasing as time goes by, which indicates we are getting a closer guess. - - -Evaluate the Model -------------------- - -Usually, a different dataset that left out during training phase should be used to evalute the models. However, we are lucky enough to know the real answer: ``w=2, b=0.3``, thus a better option is to check out model parameters directly. - -In PaddlePaddle, training is just to get a collection of model parameters, which are ``w`` and ``b`` in this case. Each parameter is saved in an individual file in the popular ``numpy`` array format. Here is the code that reads parameters from last pass. - - .. code-block:: python - - import numpy as np - import os - - def load(file_name): - with open(file_name, 'rb') as f: - f.read(16) # skip header for float type. - return np.fromfile(f, dtype=np.float32) - - print 'w=%.6f, b=%.6f' % (load('output/pass-00029/w'), load('output/pass-00029/b')) - # w=1.999743, b=0.300137 - - .. image:: parameters.png - :align: center - -Although starts from a random guess, you can see that value of ``w`` changes quickly towards 2 and ``b`` changes quickly towards 0.3. In the end, the predicted line is almost identical with real answer. - -There, you have recovered the underlying pattern between ``X`` and ``Y`` only from observed data. diff --git a/dev/doc/_sources/getstarted/build_and_install/build_from_source_en.md.txt b/dev/doc/_sources/getstarted/build_and_install/build_from_source_en.md.txt deleted file mode 100644 index d9d54bff3096cb3520409971dbd1b2e179ac8be1..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/getstarted/build_and_install/build_from_source_en.md.txt +++ /dev/null @@ -1,222 +0,0 @@ -Installing from Sources -========================== - -* [1. Download and Setup](#download) -* [2. Requirements](#requirements) -* [3. Build on Ubuntu](#ubuntu) -* [4. Build on Centos](#centos) - - -## Download and Setup -You can download PaddlePaddle from the [github source](https://github.com/PaddlePaddle/Paddle). - -```bash -git clone https://github.com/PaddlePaddle/Paddle paddle -cd paddle -``` -## Requirements - -To compile the source code, your computer must be equipped with the following dependencies. - -- **Compiler**: GCC >= 4.8 or Clang >= 3.3 (AppleClang >= 5.1) and gfortran compiler -- **CMake**: CMake >= 3.0 (at least CMake 3.4 on Mac OS X) -- **BLAS**: MKL, OpenBlas or ATLAS -- **Python**: only support Python 2.7 - -**Note:** For CUDA 7.0 and CUDA 7.5, GCC 5.0 and up are not supported! -For CUDA 8.0, GCC versions later than 5.3 are not supported! - -### Options - -PaddlePaddle supports some build options. - - - - - - - - - - - - - - - - - - - - - - - - - - -
OptionalDescription
WITH_GPUCompile PaddlePaddle with NVIDIA GPU
WITH_AVXCompile PaddlePaddle with AVX intrinsics
WITH_DSOCompile PaddlePaddle with dynamic linked CUDA
WITH_TESTINGCompile PaddlePaddle with unit testing
WITH_SWIG_PYCompile PaddlePaddle with inference api
WITH_STYLE_CHECKCompile PaddlePaddle with style check
WITH_PYTHONCompile PaddlePaddle with python interpreter
WITH_DOUBLECompile PaddlePaddle with double precision
WITH_RDMACompile PaddlePaddle with RDMA support
WITH_TIMERCompile PaddlePaddle with stats timer
WITH_PROFILERCompile PaddlePaddle with GPU profiler
WITH_DOCCompile PaddlePaddle with documentation
ON_COVERALLSCompile PaddlePaddle with code coverage
COVERALLS_UPLOADPackage code coverage data to coveralls
ON_TRAVISExclude special unit test on Travis CI
- - -**Note:** - - The GPU version works best with Cuda Toolkit 8.0 and cuDNN v5. - - Other versions like Cuda Toolkit 7.0, 7.5 and cuDNN v3, v4 are also supported. - - **To utilize cuDNN v5, Cuda Toolkit 7.5 is prerequisite and vice versa.** - -As a simple example, consider the following: - -1. **BLAS Dependencies(optional)** - - CMake will search BLAS libraries from system. If not found, OpenBLAS will be downloaded, built and installed automatically. - To utilize preinstalled BLAS, you can simply specify MKL, OpenBLAS or ATLAS via `MKL_ROOT`, `OPENBLAS_ROOT` or `ATLAS_ROOT`. - - ```bash - # specify MKL - cmake .. -DMKL_ROOT= - # or specify OpenBLAS - cmake .. -DOPENBLAS_ROOT= - ``` - -2. **Doc Dependencies(optional)** - - To generate PaddlePaddle's documentation, install dependencies and set `-DWITH_DOC=ON` as follows: - - ```bash - pip install 'sphinx>=1.4.0' - pip install sphinx_rtd_theme recommonmark - - # install doxygen on Ubuntu - sudo apt-get install doxygen - # install doxygen on Mac OS X - brew install doxygen - - # active docs in cmake - cmake .. -DWITH_DOC=ON` - ``` - -## Build on Ubuntu 14.04 - -### Install Dependencies - -- **Paddle Dependencies** - - ```bash - # necessary - sudo apt-get update - sudo apt-get install -y git curl gcc g++ gfortran make build-essential automake - sudo apt-get install -y python python-pip python-numpy libpython-dev bison - sudo pip install 'protobuf==3.1.0.post1' - - # install cmake 3.4 - curl -sSL https://cmake.org/files/v3.4/cmake-3.4.1.tar.gz | tar -xz && \ - cd cmake-3.4.1 && ./bootstrap && make -j4 && sudo make install && \ - cd .. && rm -rf cmake-3.4.1 - ``` - -- **GPU Dependencies (optional)** - - To build GPU version, you will need the following installed: - - 1. a CUDA-capable GPU - 2. A supported version of Linux with a gcc compiler and toolchain - 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads) - 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn) - - The CUDA development environment relies on tight integration with the host development environment, - including the host compiler and C runtime libraries, and is therefore only supported on - distribution versions that have been qualified for this CUDA Toolkit release. - - After downloading cuDNN library, issue the following commands: - - ```bash - sudo tar -xzf cudnn-7.5-linux-x64-v5.1.tgz -C /usr/local - sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn* - ``` - Then you need to set LD\_LIBRARY\_PATH, PATH environment variables in ~/.bashrc. - - ```bash - export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH - export PATH=/usr/local/cuda/bin:$PATH - ``` - -### Build and Install - -As usual, the best option is to create build folder under paddle project directory. - -```bash -mkdir build && cd build -``` - -Finally, you can build and install PaddlePaddle: - -```bash -# you can add build option here, such as: -cmake .. -DCMAKE_INSTALL_PREFIX= -# please use sudo make install, if you want to install PaddlePaddle into the system -make -j `nproc` && make install -# set PaddlePaddle installation path in ~/.bashrc -export PATH=/bin:$PATH -# install PaddlePaddle Python modules. -sudo pip install /opt/paddle/share/wheels/*.whl -``` -## Build on Centos 7 - -### Install Dependencies - -- **CPU Dependencies** - - ```bash - # necessary - sudo yum update - sudo yum install -y epel-release - sudo yum install -y make cmake3 python-devel python-pip gcc-gfortran swig git - sudo pip install wheel numpy - sudo pip install 'protobuf>=3.0.0' - ``` - -- **GPU Dependencies (optional)** - - To build GPU version, you will need the following installed: - - 1. a CUDA-capable GPU - 2. A supported version of Linux with a gcc compiler and toolchain - 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads) - 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn) - - The CUDA development environment relies on tight integration with the host development environment, - including the host compiler and C runtime libraries, and is therefore only supported on - distribution versions that have been qualified for this CUDA Toolkit release. - - After downloading cuDNN library, issue the following commands: - - ```bash - sudo tar -xzf cudnn-7.5-linux-x64-v5.1.tgz -C /usr/local - sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn* - ``` - Then you need to set LD\_LIBRARY\_PATH, PATH environment variables in ~/.bashrc. - - ```bash - export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH - export PATH=/usr/local/cuda/bin:$PATH - ``` - -### Build and Install - -As usual, the best option is to create build folder under paddle project directory. - -```bash -mkdir build && cd build -``` - -Finally, you can build and install PaddlePaddle: - -```bash -# you can add build option here, such as: -cmake3 .. -DCMAKE_INSTALL_PREFIX= -# please use sudo make install, if you want to install PaddlePaddle into the system -make -j `nproc` && make install -# set PaddlePaddle installation path in ~/.bashrc -export PATH=/bin:$PATH -# install PaddlePaddle Python modules. -sudo pip install /opt/paddle/share/wheels/*.whl -``` diff --git a/dev/doc/_sources/getstarted/build_and_install/docker_install_en.rst.txt b/dev/doc/_sources/getstarted/build_and_install/docker_install_en.rst.txt deleted file mode 100644 index 5a1056e859a0c977c9cd365ff1e4ffe58596f41f..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/getstarted/build_and_install/docker_install_en.rst.txt +++ /dev/null @@ -1,179 +0,0 @@ -PaddlePaddle in Docker Containers -================================= - -Docker container is currently the only officially-supported way to -running PaddlePaddle. This is reasonable as Docker now runs on all -major operating systems including Linux, Mac OS X, and Windows. -Please be aware that you will need to change `Dockers settings -`_ to make full use -of your hardware resource on Mac OS X and Windows. - - -Development Using Docker ------------------------- - -Developers can work on PaddlePaddle using Docker. This allows -developers to work on different platforms -- Linux, Mac OS X, and -Windows -- in a consistent way. - -1. Build the Development Environment as a Docker Image - - .. code-block:: bash - - git clone --recursive https://github.com/PaddlePaddle/Paddle - cd Paddle - docker build -t paddle:dev -f paddle/scripts/docker/Dockerfile . - - - Note that by default :code:`docker build` wouldn't import source - tree into the image and build it. If we want to do that, we need - to set a build arg: - - .. code-block:: bash - - docker build -t paddle:dev -f paddle/scripts/docker/Dockerfile --build-arg BUILD_AND_INSTALL=ON . - - -2. Run the Development Environment - - Once we got the image :code:`paddle:dev`, we can use it to develop - Paddle by mounting the local source code tree into a container that - runs the image: - - .. code-block:: bash - - docker run -d -p 2202:22 -v $PWD:/paddle paddle:dev - - This runs a container of the development environment Docker image - with the local source tree mounted to :code:`/paddle` of the - container. - - Note that the default entry-point of :code:`paddle:dev` is - :code:`sshd`, and above :code:`docker run` commands actually starts - an SSHD server listening on port 2202. This allows us to log into - this container with: - - .. code-block:: bash - - ssh root@localhost -p 2202 - - Usually, I run above commands on my Mac. I can also run them on a - GPU server :code:`xxx.yyy.zzz.www` and ssh from my Mac to it: - - .. code-block:: bash - - my-mac$ ssh root@xxx.yyy.zzz.www -p 2202 - -3. Build and Install Using the Development Environment - - Once I am in the container, I can use - :code:`paddle/scripts/docker/build.sh` to build, install, and test - Paddle: - - .. code-block:: bash - - /paddle/paddle/scripts/docker/build.sh - - This builds everything about Paddle in :code:`/paddle/build`. And - we can run unit tests there: - - .. code-block:: bash - - cd /paddle/build - ctest - - -CPU-only and GPU Images ------------------------ - -For each version of PaddlePaddle, we release 2 Docker images, a -CPU-only one and a CUDA GPU one. We do so by configuring -`dockerhub.com `_ -automatically runs the following commands: - -.. code-block:: bash - - docker build -t paddle:cpu -f paddle/scripts/docker/Dockerfile . - docker build -t paddle:gpu -f paddle/scripts/docker/Dockerfile.gpu . - - -To run the CPU-only image as an interactive container: - -.. code-block:: bash - - docker run -it --rm paddledev/paddle:cpu-latest /bin/bash - -or, we can run it as a daemon container - -.. code-block:: bash - - docker run -d -p 2202:22 paddledev/paddle:cpu-latest - -and SSH to this container using password :code:`root`: - -.. code-block:: bash - - ssh -p 2202 root@localhost - -An advantage of using SSH is that we can connect to PaddlePaddle from -more than one terminals. For example, one terminal running vi and -another one running Python interpreter. Another advantage is that we -can run the PaddlePaddle container on a remote server and SSH to it -from a laptop. - - -Above methods work with the GPU image too -- just please don't forget -to install CUDA driver and let Docker knows about it: - -.. code-block:: bash - - export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" - export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest - - -Non-AVX Images --------------- - -Please be aware that the CPU-only and the GPU images both use the AVX -instruction set, but old computers produced before 2008 do not support -AVX. The following command checks if your Linux computer supports -AVX: - -.. code-block:: bash - - if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi - - -If it doesn't, we will need to build non-AVX images manually from -source code: - -.. code-block:: bash - - cd ~ - git clone https://github.com/PaddlePaddle/Paddle.git - cd Paddle - docker build --build-arg WITH_AVX=OFF -t paddle:cpu-noavx -f paddle/scripts/docker/Dockerfile . - docker build --build-arg WITH_AVX=OFF -t paddle:gpu-noavx -f paddle/scripts/docker/Dockerfile.gpu . - - -Documentation -------------- - -Paddle Docker images include an HTML version of C++ source code -generated using `woboq code browser -`_. This makes it easy -for users to browse and understand the C++ source code. - -As long as we give the Paddle Docker container a name, we can run an -additional Nginx Docker container to serve the volume from the Paddle -container: - -.. code-block:: bash - - docker run -d --name paddle-cpu-doc paddle:cpu - docker run -d --volumes-from paddle-cpu-doc -p 8088:80 nginx - - -Then we can direct our Web browser to the HTML version of source code -at http://localhost:8088/paddle/ diff --git a/dev/doc/_sources/getstarted/build_and_install/index_en.rst.txt b/dev/doc/_sources/getstarted/build_and_install/index_en.rst.txt deleted file mode 100644 index 1bfd4f75c0b9b82d61d28a30f03181f7be159f24..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/getstarted/build_and_install/index_en.rst.txt +++ /dev/null @@ -1,23 +0,0 @@ -Install and Build -================= - -Install PaddlePaddle ----------------------- - -.. toctree:: - :maxdepth: 1 - - docker_install_en.rst - ubuntu_install_en.rst - -Build from Source ------------------ - -.. warning:: - - Please use :code:`deb` package or :code:`docker` image to install paddle. The building guide is used for hacking or contributing PaddlePaddle source code. - -.. toctree:: - :maxdepth: 1 - - build_from_source_en.md diff --git a/dev/doc/_sources/getstarted/build_and_install/ubuntu_install_en.rst.txt b/dev/doc/_sources/getstarted/build_and_install/ubuntu_install_en.rst.txt deleted file mode 100644 index ea8042085bf458be96e71017d229d88ad867695b..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/getstarted/build_and_install/ubuntu_install_en.rst.txt +++ /dev/null @@ -1,25 +0,0 @@ -Debian Package installation guide -================================= - -PaddlePaddle supports :code:`deb` pacakge. The installation of this :code:`deb` package is tested in ubuntu 14.04, but it should be support other debian based linux, too. - -There are four versions of debian package, :code:`cpu`, :code:`gpu`, :code:`cpu-noavx`, :code:`gpu-noavx`. And :code:`noavx` version is used to support CPU which does not contain :code:`AVX` instructions. The download url of :code:`deb` package is \: https://github.com/baidu/Paddle/releases/ - - -After downloading PaddlePaddle deb packages, you can use :code:`gdebi` install. - -.. code-block:: bash - - gdebi paddle-*.deb - -If :code:`gdebi` is not installed, you can use :code:`sudo apt-get install gdebi` to install it. - -Or you can use following commands to install PaddlePaddle. - -.. code-block:: bash - - dpkg -i paddle-*.deb - apt-get install -f - -And if you use GPU version deb package, you need to install CUDA toolkit and cuDNN, and set related environment variables(such as LD_LIBRARY_PATH) first. It is normal when `dpkg -i` get errors. `apt-get install -f` will continue install paddle, and install dependences. - diff --git a/dev/doc/_sources/getstarted/index_en.rst.txt b/dev/doc/_sources/getstarted/index_en.rst.txt deleted file mode 100644 index 55d95d8015e56ddae3363d19315db0fad841caad..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/getstarted/index_en.rst.txt +++ /dev/null @@ -1,8 +0,0 @@ -GET STARTED -============ - -.. toctree:: - :maxdepth: 2 - - build_and_install/index_en.rst - basic_usage/index_en.rst diff --git a/dev/doc/_sources/howto/deep_model/rnn/index_en.rst.txt b/dev/doc/_sources/howto/deep_model/rnn/index_en.rst.txt deleted file mode 100644 index 7adc79873d699fdfd5a85034bcef964dd1f19132..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/deep_model/rnn/index_en.rst.txt +++ /dev/null @@ -1,7 +0,0 @@ -RNN Models -========== - -.. toctree:: - :maxdepth: 1 - - rnn_config_en.rst diff --git a/dev/doc/_sources/howto/deep_model/rnn/rnn_config_en.rst.txt b/dev/doc/_sources/howto/deep_model/rnn/rnn_config_en.rst.txt deleted file mode 100644 index 73f5d5371fcd3ce95253cad47b0d8e738284441c..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/deep_model/rnn/rnn_config_en.rst.txt +++ /dev/null @@ -1,251 +0,0 @@ -RNN Configuration -================= - -This tutorial will guide you how to configure recurrent neural network in PaddlePaddle. PaddlePaddle supports highly flexible and efficient recurrent neural network configuration. In this tutorial, you will learn how to: - -- prepare sequence data for learning recurrent neural networks. -- configure recurrent neural network architecture. -- generate sequence with learned recurrent neural network models. - -We will use vanilla recurrent neural network, and sequence to sequence model to guide you through these steps. The code of sequence to sequence model can be found at :code:`demo/seqToseq`. - -===================== -Prepare Sequence Data -===================== - -PaddlePaddle does not need any preprocessing to sequence data, such as padding. The only thing that needs to be done is to set the type of the corresponding type to input. For example, the following code snippets defines three input. All of them are sequences, and the size of them are :code:`src_dict`, :code:`trg_dict`, and :code:`trg_dict`: - -.. code-block:: python - - settings.input_types = [ - integer_value_sequence(len(settings.src_dict)), - integer_value_sequence(len(settings.trg_dict)), - integer_value_sequence(len(settings.trg_dict))] - - -Then at the :code:`process` function, each :code:`yield` function will return three integer lists. Each integer list is treated as a sequence of integers: - -.. code-block:: python - - yield src_ids, trg_ids, trg_ids_next - - -For more details description of how to write a data provider, please refer to :ref:`api_pydataprovider2` . The full data provider file is located at :code:`demo/seqToseq/dataprovider.py`. - -=============================================== -Configure Recurrent Neural Network Architecture -=============================================== - -------------------------------------- -Simple Gated Recurrent Neural Network -------------------------------------- - -Recurrent neural network process a sequence at each time step sequentially. An example of the architecture of LSTM is listed below. - -.. image:: ../../../tutorials/sentiment_analysis/src/bi_lstm.jpg - :align: center - -Generally speaking, a recurrent network perform the following operations from :math:`t=1` to :math:`t=T`, or reversely from :math:`t=T` to :math:`t=1`. - -.. math:: - - x_{t+1} = f_x(x_t), y_t = f_y(x_t) - - -where :math:`f_x(.)` is called **step function**, and :math:`f_y(.)` is called **output function**. In vanilla recurrent neural network, both of the step function and output function are very simple. However, PaddlePaddle supports the configuration of very complex architectures by modifying these two functions. We will use the sequence to sequence model with attention as an example to demonstrate how you can configure complex recurrent neural network models. In this section, we will use a simple vanilla recurrent neural network as an example of configuring simple recurrent neural network using :code:`recurrent_group`. Notice that if you only need to use simple RNN, GRU, or LSTM, then :code:`grumemory` and :code:`lstmemory` is recommended because they are more computationally efficient than :code:`recurrent_group`. - -For vanilla RNN, at each time step, the **step function** is: - -.. math:: - - x_{t+1} = W_x x_t + W_i I_t + b - -where :math:`x_t` is the RNN state, and :math:`I_t` is the input, :math:`W_x` and :math:`W_i` are transformation matrices for RNN states and inputs, respectively. :math:`b` is the bias. -Its **output function** simply takes :math:`x_t` as the output. - -:code:`recurrent_group` is the most important tools for constructing recurrent neural networks. It defines the **step function**, **output function** and the inputs of the recurrent neural network. Notice that the :code:`step` argument of this function implements both the :code:`step function` and the :code:`output function`: - -.. code-block:: python - - def simple_rnn(input, - size=None, - name=None, - reverse=False, - rnn_bias_attr=None, - act=None, - rnn_layer_attr=None): - def __rnn_step__(ipt): - out_mem = memory(name=name, size=size) - rnn_out = mixed_layer(input = [full_matrix_projection(ipt), - full_matrix_projection(out_mem)], - name = name, - bias_attr = rnn_bias_attr, - act = act, - layer_attr = rnn_layer_attr, - size = size) - return rnn_out - return recurrent_group(name='%s_recurrent_group' % name, - step=__rnn_step__, - reverse=reverse, - input=input) - - -PaddlePaddle uses memory to construct step function. **Memory** is the most important concept when constructing recurrent neural networks in PaddlePaddle. A memory is a state that is used recurrently in step functions, such as :math:`x_{t+1} = f_x(x_t)`. One memory contains an **output** and a **input**. The output of memory at the current time step is utilized as the input of the memory at the next time step. A memory can also has a **boot layer**, whose output is utilized as the initial value of the memory. In our case, the output of the gated recurrent unit is employed as the output memory. Notice that the name of the layer :code:`rnn_out` is the same as the name of :code:`out_mem`. This means the output of the layer :code:`rnn_out` (:math:`x_{t+1}`) is utilized as the **output** of :code:`out_mem` memory. - -A memory can also be a sequence. In this case, at each time step, we have a sequence as the state of the recurrent neural network. This can be useful when constructing very complex recurrent neural network. Other advanced functions include defining multiple memories, and defining hierarchical recurrent neural network architecture using sub-sequence. - -We return :code:`rnn_out` at the end of the function. It means that the output of the layer :code:`rnn_out` is utilized as the **output** function of the gated recurrent neural network. - ------------------------------------------ -Sequence to Sequence Model with Attention ------------------------------------------ -We will use the sequence to sequence model with attention as an example to demonstrate how you can configure complex recurrent neural network models. An illustration of the sequence to sequence model with attention is shown in the following figure. - -.. image:: ../../../tutorials/text_generation/encoder-decoder-attention-model.png - :align: center - -In this model, the source sequence :math:`S = \{s_1, \dots, s_T\}` is encoded with a bidirectional gated recurrent neural networks. The hidden states of the bidirectional gated recurrent neural network :math:`H_S = \{H_1, \dots, H_T\}` is called *encoder vector* The decoder is a gated recurrent neural network. When decoding each token :math:`y_t`, the gated recurrent neural network generates a set of weights :math:`W_S^t = \{W_1^t, \dots, W_T^t\}`, which are used to compute a weighted sum of the encoder vector. The weighted sum of the encoder vector is utilized to condition the generation of the token :math:`y_t`. - -The encoder part of the model is listed below. It calls :code:`grumemory` to represent gated recurrent neural network. It is the recommended way of using recurrent neural network if the network architecture is simple, because it is faster than :code:`recurrent_group`. We have implemented most of the commonly used recurrent neural network architectures, you can refer to :ref:`api_trainer_config_helpers_layers` for more details. - -We also project the encoder vector to :code:`decoder_size` dimensional space, get the first instance of the backward recurrent network, and project it to :code:`decoder_size` dimensional space: - -.. code-block:: python - - # Define the data layer of the source sentence. - src_word_id = data_layer(name='source_language_word', size=source_dict_dim) - # Calculate the word embedding of each word. - src_embedding = embedding_layer( - input=src_word_id, - size=word_vector_dim, - param_attr=ParamAttr(name='_source_language_embedding')) - # Apply forward recurrent neural network. - src_forward = grumemory(input=src_embedding, size=encoder_size) - # Apply backward recurrent neural network. reverse=True means backward recurrent neural network. - src_backward = grumemory(input=src_embedding, - size=encoder_size, - reverse=True) - # Mix the forward and backward parts of the recurrent neural network together. - encoded_vector = concat_layer(input=[src_forward, src_backward]) - - # Project encoding vector to decoder_size. - encoder_proj = mixed_layer(input = [full_matrix_projection(encoded_vector)], - size = decoder_size) - - # Compute the first instance of the backward RNN. - backward_first = first_seq(input=src_backward) - - # Project the first instance of backward RNN to decoder size. - decoder_boot = mixed_layer(input=[full_matrix_projection(backward_first)], size=decoder_size, act=TanhActivation()) - - -The decoder uses :code:`recurrent_group` to define the recurrent neural network. The step and output functions are defined in :code:`gru_decoder_with_attention`: - -.. code-block:: python - - group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] - trg_embedding = embedding_layer( - input=data_layer(name='target_language_word', - size=target_dict_dim), - size=word_vector_dim, - param_attr=ParamAttr(name='_target_language_embedding')) - group_inputs.append(trg_embedding) - - # For decoder equipped with attention mechanism, in training, - # target embedding (the groudtruth) is the data input, - # while encoded source sequence is accessed to as an unbounded memory. - # StaticInput means the same value is utilized at different time steps. - # Otherwise, it is a sequence input. Inputs at different time steps are different. - # All sequence inputs should have the same length. - decoder = recurrent_group(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs) - - -The implementation of the step function is listed as below. First, it defines the **memory** of the decoder network. Then it defines attention, gated recurrent unit step function, and the output function: - -.. code-block:: python - - def gru_decoder_with_attention(enc_vec, enc_proj, current_word): - # Defines the memory of the decoder. - # The output of this memory is defined in gru_step. - # Notice that the name of gru_step should be the same as the name of this memory. - decoder_mem = memory(name='gru_decoder', - size=decoder_size, - boot_layer=decoder_boot) - # Compute attention weighted encoder vector. - context = simple_attention(encoded_sequence=enc_vec, - encoded_proj=enc_proj, - decoder_state=decoder_mem) - # Mix the current word embedding and the attention weighted encoder vector. - decoder_inputs = mixed_layer(inputs = [full_matrix_projection(context), - full_matrix_projection(current_word)], - size = decoder_size * 3) - # Define Gated recurrent unit recurrent neural network step function. - gru_step = gru_step_layer(name='gru_decoder', - input=decoder_inputs, - output_mem=decoder_mem, - size=decoder_size) - # Defines the output function. - out = mixed_layer(input=[full_matrix_projection(input=gru_step)], - size=target_dict_dim, - bias_attr=True, - act=SoftmaxActivation()) - return out - - -================= -Generate Sequence -================= -After training the model, we can use it to generate sequences. A common practice is to use **beam search** to generate sequences. The following code snippets defines a beam search algorithm. Notice that :code:`beam_search` function assumes the output function of the :code:`step` returns a softmax normalized probability vector of the next token. We made the following changes to the model. - -* use :code:`GeneratedInput` for trg_embedding. :code:`GeneratedInput` computes the embedding of the generated token at the last time step for the input at the current time step. -* use :code:`beam_search` function. This function needs to set: - - - :code:`bos_id`: the start token. Every sentence starts with the start token. - - :code:`eos_id`: the end token. Every sentence ends with the end token. - - :code:`beam_size`: the beam size used in beam search. - - :code:`max_length`: the maximum length of the generated sentences. - -* use :code:`seqtext_printer_evaluator` to print text according to index matrix and dictionary. This function needs to set: - - - :code:`id_input`: the integer ID of the data, used to identify the corresponding output in the generated files. - - :code:`dict_file`: the dictionary file for converting word id to word. - - :code:`result_file`: the path of the generation result file. - -The code is listed below: - -.. code-block:: python - - group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] - # In generation, decoder predicts a next target word based on - # the encoded source sequence and the last generated target word. - # The encoded source sequence (encoder's output) must be specified by - # StaticInput which is a read-only memory. - # Here, GeneratedInputs automatically fetchs the last generated word, - # which is initialized by a start mark, such as . - trg_embedding = GeneratedInput( - size=target_dict_dim, - embedding_name='_target_language_embedding', - embedding_size=word_vector_dim) - group_inputs.append(trg_embedding) - beam_gen = beam_search(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs, - bos_id=0, # Beginnning token. - eos_id=1, # End of sentence token. - beam_size=beam_size, - max_length=max_length) - - seqtext_printer_evaluator(input=beam_gen, - id_input=data_layer(name="sent_id", size=1), - dict_file=trg_dict_path, - result_file=gen_trans_file) - outputs(beam_gen) - - -Notice that this generation technique is only useful for decoder like generation process. If you are working on sequence tagging tasks, please refer to :ref:`semantic_role_labeling` for more details. - -The full configuration file is located at :code:`demo/seqToseq/seqToseq_net.py`. diff --git a/dev/doc/_sources/howto/dev/contribute_to_paddle_en.md.txt b/dev/doc/_sources/howto/dev/contribute_to_paddle_en.md.txt deleted file mode 100644 index 9b0d3e83c0dc264650eda73e6801c60a75439b4a..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/dev/contribute_to_paddle_en.md.txt +++ /dev/null @@ -1,146 +0,0 @@ -# Contribute Code - -We sincerely appreciate your contributions. You can use fork and pull request -workflow to merge your code. - -## Code Requirements -- Your code must be fully documented by - [doxygen](http://www.stack.nl/~dimitri/doxygen/) style. -- Make sure the compiler option WITH\_STYLE\_CHECK is on and the compiler - passes the code style check. -- All code must have unit test. -- Pass all unit tests. - -The following tutorial guides you into submitting your contibution. - -## [Creating a Fork](https://help.github.com/articles/fork-a-repo/) - -Just head over to the GitHub page and click the "Fork" button. -It's just that simple. - -## Clone - -Paddle is currently using [git-flow branching model](http://nvie.com/posts/a-successful-git-branching-model/). -The **develop** is the main branch, and other user's branches are feature branches. - -Once you've created a fork, you can use your favorite git client to clone your -repo or just head straight to the command line: - -```shell -# Clone your fork to your local machine -git clone --branch develop https://github.com/USERNAME/Paddle.git -``` -If your repository doesn't contain **develop** branch, just create it by your own. - -```shell -git clone https://github.com/USERNAME/Paddle.git Paddle -cd Paddle -git checkout -b develop # create develop branch. -git remote add upstream https://github.com/PaddlePaddle/Paddle.git # add upstream to baidu/Paddle -git pull upstream develop # update to upstream -``` - -Then you can start to develop by making a local developement branch - -```shell -git checkout -b MY_COOL_STUFF_BRANCH -``` - -## Using `pre-commit` hook - -Paddle developers use [pre-commit](http://pre-commit.com/) tool to manage git -pre-commit hooks. It can help us format source codes (cpp, python), check some -basic thing before commit (only one EOL for each file, do not add a huge file -in git). `pre-commit` tests is a part of unit tests in Travis-CI now, every -PR doesn't fit hook can not be merged into Paddle. - -To use [pre-commit](http://pre-commit.com/), you should install it by -`pip install pre-commit`, and currently, Paddle uses `clang-format` to format -c/cpp sources. Please make sure clang-format 3.8+ installed. - -Then just run `pre-commit install` in your Paddle clone directory. When you -commit your code, the pre-commit hook will check the local code if there is -anything not suitable to commit, and so on. - -## Commit - -Commit your changes by following command lines: - -```shell -# show the working tree status -git status -# add modified files -git add xx -env EDITOR=vim git commit # You can write your comments by vim/nano/emacs. -``` -The first line of commit infomation is the title. The second and later lines -are the details if any. - -## Keeping Fork Up to Date - -Before pull your request, you should sync your code from the latest PaddlePaddle. -To do this, you'll need to add a remote at first: - -```shell -# see the current configured remote repository -git remote -v -# add upstream repository -git remote add upstream https://github.com/PaddlePaddle/Paddle.git -# verify the new upstream -git remote -v -``` - -Update your fork with the latest upstream changes: - -```shell -git pull --rebase upstream develop -``` - -If there are no unique commits locally, git will simply perform a fast-forward. -However, if you have been making changes (in the vast majority of cases you -probably shouldn't be), you may have to deal with conflicts. - -Now, your local master branch is up-to-date with everything modified upstream. - -## Push to GitHub - -```shell -# push to your repository in Github -git push -u origin MY_COOL_STUFF_BRANCH # create remote branch MY_COOL_STUFF_BRANCH to origin. -``` - -## Pull Request - -Go to the page for your fork on GitHub, select your development branch, -and click the **pull request button**. - -## Update your pull request with the lastest version - -During the code review, your pull request may become stale because new commits in -baidu/Paddle. GitHub allows autmotic update if there is no conflict. You can do this -by clicking the "Update Branch" button in your pull request page. However, in the case -of conflict, you need to do the update manually. You need to do the following on -your local repository: -```shell -git checkout MY_COOL_STUFF_BRANCH -git pull upstream develop -# You may need to resolve the conflict according to the git prompt. -# Make and test your code. -git push origin MY_COOL_STUFF_BRANCH -``` -Now your Pull Request is updated with the latest version. - -## Revise your pull request - -When you revise your pull request according to reviewer's comments, please use 'git commit' instead of 'git commit --amend' to commit your changes so that the reviewers can see the difference between the new pull requrest and the old pull request. - -The possible commands are - -```shell -git checkout MY_COOL_STUFF_BRANCH -git pull upstream develop # update local to newest code base. -# May be some conflicts will occured. -# And develop your cool stuff -env EDITOR=vim git commit # add your revise log -git push origin MY_COOL_STUFF_BRANCH -``` diff --git a/dev/doc/_sources/howto/dev/new_layer_en.rst.txt b/dev/doc/_sources/howto/dev/new_layer_en.rst.txt deleted file mode 100644 index 46481f5ead33dc6a26507e021fd9ae0f8316e940..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/dev/new_layer_en.rst.txt +++ /dev/null @@ -1,390 +0,0 @@ -================ -Write New Layers -================ - -This tutorial will guide you to write customized layers in PaddlePaddle. We will utilize fully connected layer as an example to guide you through the following steps for writing a new layer. - -- Derive equations for the forward and backward part of the layer. -- Implement C++ class for the layer. -- Write gradient check unit test to make sure the gradients are correctly computed. -- Implement Python wrapper for the layer. - -Derive Equations -================ - -First we need to derive equations of the *forward* and *backward* part of the layer. The forward part computes the output given an input. The backward part computes the gradients of the input and the parameters given the the gradients of the output. - -The illustration of a fully connected layer is shown in the following figure. In a fully connected layer, all output nodes are connected to all the input nodes. - -.. image:: FullyConnected.jpg - :align: center - :scale: 60 % - -The *forward part* of a layer transforms an input into the corresponding output. -Fully connected layer takes a dense input vector with dimension :math:`D_i`. It uses a transformation matrix :math:`W` with size :math:`D_i \times D_o` to project :math:`x` into a :math:`D_o` dimensional vector, and add a bias vector :math:`b` with dimension :math:`D_o` to the vector. - -.. math:: - - y = f(W^T x + b) - -where :math:`f(.)` is an nonlinear *activation* function, such as sigmoid, tanh, and Relu. - -The transformation matrix :math:`W` and bias vector :math:`b` are the *parameters* of the layer. The *parameters* of a layer are learned during training in the *backward pass*. The backward pass computes the gradients of the output function with respect to all parameters and inputs. The optimizer can use chain rule to compute the gradients of the loss function with respect to each parameter. - -Suppose our loss function is :math:`c(y)`, then - -.. math:: - - \frac{\partial c(y)}{\partial x} = \frac{\partial c(y)}{\partial y} \frac{\partial y}{\partial x} - -Suppose :math:`z = f(W^T x + b)`, then - -.. math:: - - \frac{\partial y}{\partial z} = \frac{\partial f(z)}{\partial z} - -This derivative can be automatically computed by our base layer class. - -Then, for fully connected layer, we need to compute: - -.. math:: - - \frac{\partial z}{\partial x} = W, \frac{\partial z_j}{\partial W_{ij}} = x_i, \frac{\partial z}{\partial b} = \mathbf 1 - -where :math:`\mathbf 1` is an all one vector, :math:`W_{ij}` is the number at the i-th row and j-th column of the matrix :math:`W`, :math:`z_j` is the j-th component of the vector :math:`z`, and :math:`x_i` is the i-th component of the vector :math:`x`. - -Finally we can use chain rule to calculate :math:`\frac{\partial z}{\partial x}`, and :math:`\frac{\partial z}{\partial W}`. The details of the computation will be given in the next section. - -Implement C++ Class -=================== - -The C++ class of the layer implements the initialization, forward, and backward part of the layer. The fully connected layer is at :code:`paddle/gserver/layers/FullyConnectedLayer.h` and :code:`paddle/gserver/layers/FullyConnectedLayer.cpp`. We list simplified version of the code below. - -It needs to derive the base class :code:`paddle::Layer`, and it needs to override the following functions: - -- constructor and destructor. -- :code:`init` function. It is used to initialize the parameters and settings. -- :code:`forward`. It implements the forward part of the layer. -- :code:`backward`. It implements the backward part of the layer. -- :code:`prefetch`. It is utilized to determine the rows corresponding parameter matrix to prefetch from parameter server. You do not need to override this function if your layer does not need remote sparse update. (most layers do not need to support remote sparse update) - - -The header file is listed below: - -.. code-block:: c++ - - namespace paddle { - /** - * A layer has full connections to all neurons in the previous layer. - * It computes an inner product with a set of learned weights, and - * (optionally) adds biases. - * - * The config file api is fc_layer. - */ - - class FullyConnectedLayer : public Layer { - protected: - WeightList weights_; - std::unique_ptr biases_; - - public: - explicit FullyConnectedLayer(const LayerConfig& config) - : Layer(config) {} - ~FullyConnectedLayer() {} - - bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - - Weight& getWeight(int idx) { return *weights_[idx]; } - - void prefetch(); - void forward(PassType passType); - void backward(const UpdateCallback& callback = nullptr); - }; - } // namespace paddle - -It defines the parameters as class variables. We use :code:`Weight` class as abstraction of parameters. It supports multi-thread update. The details of this class will be described in details in the implementations. - -- :code:`weights_` is a list of weights for the transformation matrices. The current implementation can have more than one inputs. Thus, it has a list of weights. One weight corresponds to an input. -- :code:`biases_` is a weight for the bias vector. - -The fully connected layer does not have layer configuration hyper-parameters. If there are some layer hyper-parameters, a common practice is to store it in :code:`LayerConfig& config`, and put it into a class variable in the constructor. - -The following code snippet implements the :code:`init` function. - -- First, every :code:`init` function must call the :code:`init` function of the base class :code:`Layer::init(layerMap, parameterMap);`. This statement will initialize the required variables and connections for each layer. -- The it initializes all the weights matrices :math:`W`. The current implementation can have more than one inputs. Thus, it has a list of weights. -- Finally, it initializes the bias. - - -.. code-block:: c++ - - bool FullyConnectedLayer::init(const LayerMap& layerMap, - const ParameterMap& parameterMap) { - /* Initialize the basic parent class */ - Layer::init(layerMap, parameterMap); - - /* initialize the weightList */ - CHECK(inputLayers_.size() == parameters_.size()); - for (size_t i = 0; i < inputLayers_.size(); i++) { - // Option the parameters - size_t height = inputLayers_[i]->getSize(); - size_t width = getSize(); - - // create a new weight - if (parameters_[i]->isSparse()) { - CHECK_LE(parameters_[i]->getSize(), width * height); - } else { - CHECK_EQ(parameters_[i]->getSize(), width * height); - } - Weight* w = new Weight(height, width, parameters_[i]); - - // append the new weight to the list - weights_.emplace_back(w); - } - - /* initialize biases_ */ - if (biasParameter_.get() != NULL) { - biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); - } - - return true; - } - -The implementation of the forward part has the following steps. - -- Every layer must call :code:`Layer::forward(passType);` at the beginning of its :code:`forward` function. -- Then it allocates memory for the output using :code:`reserveOutput(batchSize, size);`. This step is necessary because we support the batches to have different batch sizes. :code:`reserveOutput` will change the size of the output accordingly. For the sake of efficiency, we will allocate new memory if we want to expand the matrix, but we will reuse the existing memory block if we want to shrink the matrix. -- Then it computes :math:`\sum_i W_i x + b` using Matrix operations. :code:`getInput(i).value` retrieve the matrix of the i-th input. Each input is a :math:`batchSize \times dim` matrix, where each row represents an single input in a batch. For a complete lists of supported matrix operations, please refer to :code:`paddle/math/Matrix.h` and :code:`paddle/math/BaseMatrix.h`. -- Finally it applies the activation function using :code:`forwardActivation();`. It will automatically applies the corresponding activation function specifies in the network configuration. - - -.. code-block:: c++ - - void FullyConnectedLayer::forward(PassType passType) { - Layer::forward(passType); - - /* malloc memory for the output_ if necessary */ - int batchSize = getInput(0).getBatchSize(); - int size = getSize(); - - { - // Settup the size of the output. - reserveOutput(batchSize, size); - } - - MatrixPtr outV = getOutputValue(); - - // Apply the the transformation matrix to each input. - for (size_t i = 0; i != inputLayers_.size(); ++i) { - auto input = getInput(i); - CHECK(input.value) << "The input of 'fc' layer must be matrix"; - i == 0 ? outV->mul(input.value, weights_[i]->getW(), 1, 0) - : outV->mul(input.value, weights_[i]->getW(), 1, 1); - } - - /* add the bias-vector */ - if (biases_.get() != NULL) { - outV->addBias(*(biases_->getW()), 1); - } - - /* activation */ { - forwardActivation(); - } - } - -The implementation of the backward part has the following steps. - -- :code:`backwardActivation()` computes the gradients of the activation. The gradients will be multiplies in place to the gradients of the output, which can be retrieved using :code:`getOutputGrad()`. -- Compute the gradients of bias. Notice that we an use :code:`biases_->getWGrad()` to get the gradient matrix of the corresponding parameter. After the gradient of one parameter is updated, it **MUST** call :code:`getParameterPtr()->incUpdate(callback);`. This is utilize for parameter update over multiple threads or multiple machines. -- Then it computes the gradients of the transformation matrices and inputs, and it calls :code:`incUpdate` for the corresponding parameter. This gives the framework the chance to know whether it has gathered all the gradient to one parameter so that it can do some overlapping work (e.g., network communication) - - -.. code-block:: c++ - - void FullyConnectedLayer::backward(const UpdateCallback& callback) { - /* Do derivation for activations.*/ { - backwardActivation(); - } - - if (biases_ && biases_->getWGrad()) { - biases_->getWGrad()->collectBias(*getOutputGrad(), 1); - - biases_->getParameterPtr()->incUpdate(callback); - } - - bool syncFlag = hl_get_sync_flag(); - - for (size_t i = 0; i != inputLayers_.size(); ++i) { - /* Calculate the W-gradient for the current layer */ - if (weights_[i]->getWGrad()) { - MatrixPtr input_T = getInputValue(i)->getTranspose(); - MatrixPtr oGrad = getOutputGrad(); - { - weights_[i]->getWGrad()->mul(input_T, oGrad, 1, 1); - } - } - - - /* Calculate the input layers error */ - MatrixPtr preGrad = getInputGrad(i); - if (NULL != preGrad) { - MatrixPtr weights_T = weights_[i]->getW()->getTranspose(); - preGrad->mul(getOutputGrad(), weights_T, 1, 1); - } - - { - weights_[i]->getParameterPtr()->incUpdate(callback); - } - } - } - -The :code:`prefetch` function specifies the rows that need to be fetched from parameter server during training. It is only useful for remote sparse training. In remote sparse training, the full parameter matrix is stored distributedly at the parameter server. When the layer uses a batch for training, only a subset of locations of the input is non-zero in this batch. Thus, this layer only needs the rows of the transformation matrix corresponding to the locations of these non-zero entries. The :code:`prefetch` function specifies the ids of these rows. - -Most of the layers do not need remote sparse training function. You do not need to override this function in this case. - -.. code-block:: c++ - - void FullyConnectedLayer::prefetch() { - for (size_t i = 0; i != inputLayers_.size(); ++i) { - auto* sparseParam = - dynamic_cast(weights_[i]->getW().get()); - if (sparseParam) { - MatrixPtr input = getInputValue(i); - sparseParam->addRows(input); - } - } - } - -Finally, you can use :code:`REGISTER_LAYER(fc, FullyConnectedLayer);` to register the layer. :code:`fc` is the identifier of the layer, and :code:`FullyConnectedLayer` is the class name of the layer. - -.. code-block:: c++ - - namespace paddle { - REGISTER_LAYER(fc, FullyConnectedLayer); - } - -If the :code:`cpp` file is put into :code:`paddle/gserver/layers`, it will be automatically added to the compilation list. - - -Write Gradient Check Unit Test -=============================== - -An easy way to verify the correctness of new layer's implementation is to write a gradient check unit test. Gradient check unit test utilizes finite difference method to verify the gradient of a layer. It modifies the input with a small perturbation :math:`\Delta x` and observes the changes of output :math:`\Delta y`, the gradient can be computed as :math:`\frac{\Delta y}{\Delta x }`. This gradient can be compared with the gradient computed by the :code:`backward` function of the layer to ensure the correctness of the gradient computation. Notice that the gradient check only tests the correctness of the gradient computation, it does not necessarily guarantee the correctness of the implementation of the :code:`forward` and :code:`backward` function. You need to write more sophisticated unit tests to make sure your layer is implemented correctly. - -All the gradient check unit tests are located in :code:`paddle/gserver/tests/test_LayerGrad.cpp`. You are recommended to put your test into a new test file if you are planning to write a new layer. The gradient test of the gradient check unit test of the fully connected layer is listed below. It has the following steps. - -+ Create layer configuration. A layer configuration can include the following attributes: - - size of the bias parameter. (4096 in our example) - - type of the layer. (fc in our example) - - size of the layer. (4096 in our example) - - activation type. (softmax in our example) - - dropout rate. (0.1 in our example) -+ configure the input of the layer. In our example, we have only one input. - - type of the input (:code:`INPUT_DATA`) in our example. It can be one of the following types - - :code:`INPUT_DATA`: dense vector. - - :code:`INPUT_LABEL`: integer. - - :code:`INPUT_DATA_TARGET`: dense vector, but it does not used to compute gradient. - - :code:`INPUT_SEQUENCE_DATA`: dense vector with sequence information. - - :code:`INPUT_HASSUB_SEQUENCE_DATA`: dense vector with both sequence and sub-sequence information. - - :code:`INPUT_SEQUENCE_LABEL`: integer with sequence information. - - :code:`INPUT_SPARSE_NON_VALUE_DATA`: 0-1 sparse data. - - :code:`INPUT_SPARSE_FLOAT_VALUE_DATA`: float sparse data. - - name of the input. (:code:`layer_0` in our example) - - size of the input. (8192 in our example) - - number of non-zeros, only useful for sparse inputs. - - format of sparse data, only useful for sparse inputs. -+ each inputs needs to call :code:`config.layerConfig.add_inputs();` once. -+ call :code:`testLayerGrad` to perform gradient checks. It has the following arguments. - - layer and input configurations. (:code:`config` in our example) - - type of the layer. (:code:`fc` in our example) - - batch size of the gradient check. (100 in our example) - - whether the input is transpose. Most layers need to set it to :code:`false`. (:code:`false` in our example) - - whether to use weights. Some layers or activations perform normalization so that the sum of their output is a constant. For example, the sum of output of a softmax activation is one. In this case, we cannot correctly compute the gradients using regular gradient check techniques. A weighted sum of the output, which is not a constant, is utilized to compute the gradients. (:code:`true` in our example, because the activation of a fully connected layer can be softmax) - -.. code-block:: c++ - - void testFcLayer(string format, size_t nnz) { - // Create layer configuration. - TestConfig config; - config.biasSize = 4096; - config.layerConfig.set_type("fc"); - config.layerConfig.set_size(4096); - config.layerConfig.set_active_type("softmax"); - config.layerConfig.set_drop_rate(0.1); - // Setup inputs. - config.inputDefs.push_back( - {INPUT_DATA, "layer_0", 8192, nnz, ParaSparse(format)}); - config.layerConfig.add_inputs(); - LOG(INFO) << config.inputDefs[0].sparse.sparse << " " - << config.inputDefs[0].sparse.format; - for (auto useGpu : {false, true}) { - testLayerGrad(config, "fc", 100, /* trans */ false, useGpu, - /* weight */ true); - } - } - -If you are creating a new file for the test, such as :code:`paddle/gserver/tests/testFCGrad.cpp`, you need to add the file to :code:`paddle/gserver/tests/CMakeLists.txt`. An example is given below. All the unit tests will run when you execute the command :code:`make tests`. Notice that some layers might need high accuracy for the gradient check unit tests to work well. You need to configure :code:`WITH_DOUBLE` to `ON` when configuring cmake. - -.. code-block:: bash - - add_unittest_without_exec(test_FCGrad - test_FCGrad.cpp - LayerGradUtil.cpp - TestUtil.cpp) - - add_test(NAME test_FCGrad - COMMAND test_FCGrad) - - -Implement Python Wrapper -======================== - -Implementing Python wrapper allows us to use the added layer in configuration files. All the Python wrappers are in file :code:`python/paddle/trainer/config_parser.py`. An example of the Python wrapper for fully connected layer is listed below. It has the following steps: - -- Use :code:`@config_layer('fc')` at the decorator for all the Python wrapper class. :code:`fc` is the identifier of the layer. -- Implements :code:`__init__` constructor function. - - It first call :code:`super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs)` base constructor function. :code:`FCLayer` is the Python wrapper class name, and :code:`fc` is the layer identifier name. They must be correct in order for the wrapper to work. - - Then it computes the size and format (whether sparse) of each transformation matrix as well as the size. - -.. code-block:: python - - @config_layer('fc') - class FCLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - bias=True, - **xargs): - super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs) - for input_index in xrange(len(self.inputs)): - input_layer = self.get_input_layer(input_index) - psize = self.config.size * input_layer.size - dims = [input_layer.size, self.config.size] - format = self.inputs[input_index].format - sparse = format == "csr" or format == "csc" - if sparse: - psize = self.inputs[input_index].nnz - self.create_input_parameter(input_index, psize, dims, sparse, format) - self.create_bias_parameter(bias, self.config.size) - -In network configuration, the layer can be specifies using the following code snippets. The arguments of this class are: - -- :code:`name` is the name identifier of the layer instance. -- :code:`type` is the type of the layer, specified using layer identifier. -- :code:`size` is the output size of the layer. -- :code:`bias` specifies whether this layer instance has bias. -- :code:`inputs` specifies a list of layer instance names as inputs. - -.. code-block:: python - - Layer( - name = "fc1", - type = "fc", - size = 64, - bias = True, - inputs = [Input("pool3")] - ) - -You are also recommended to implement a helper for the Python wrapper, which makes it easier to write models. You can refer to :code:`python/paddle/trainer_config_helpers/layers.py` for examples. diff --git a/dev/doc/_sources/howto/index_en.rst.txt b/dev/doc/_sources/howto/index_en.rst.txt deleted file mode 100644 index 1fbfcd260b912078f00ed5b720ed607db725c4e2..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/index_en.rst.txt +++ /dev/null @@ -1,38 +0,0 @@ -HOW TO -======= - -Usage -------- - -.. toctree:: - :maxdepth: 1 - - usage/cmd_parameter/index_en.rst - usage/cluster/cluster_train_en.md - usage/k8s/k8s_en.md - usage/k8s/k8s_aws_en.md - -Development ------------- - -.. toctree:: - :maxdepth: 1 - - dev/new_layer_en.rst - dev/contribute_to_paddle_en.md - -Configuration -------------- - -.. toctree:: - :maxdepth: 1 - - deep_model/rnn/index_en.rst - -Optimization -------------- - -.. toctree:: - :maxdepth: 1 - - optimization/gpu_profiling_en.rst diff --git a/dev/doc/_sources/howto/optimization/gpu_profiling_en.rst.txt b/dev/doc/_sources/howto/optimization/gpu_profiling_en.rst.txt deleted file mode 100644 index ed208ceaf7af0c5aab88fd4fcb18fa96b8c9ff38..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/optimization/gpu_profiling_en.rst.txt +++ /dev/null @@ -1,240 +0,0 @@ -==================== -Tune GPU Performance -==================== - -.. contents:: - -This tutorial will guide you step-by-step through how to conduct profiling and performance tuning using built-in timer, **nvprof** and **nvvp**. - -- What is profiling? -- Why we need profiling? -- How to do profiling? -- Profile tools -- Hands-on Tutorial -- Profiling tips - -What's profiling? -================= -In software engineering, profiling is a form of dynamic program analysis that measures the space (memory) or time -complexity of a program, the usage of particular instructions, or the frequency and duration of function calls. -Most commonly, profiling information serves to aid program optimization. - -Briefly, profiler is used to measure application performance. Program analysis tools are extremely important for -understanding program behavior. Simple profiling can tell you that how long does an operation take? For advanced -profiling, it can interpret why does an operation take a long time? - -Why we need profiling? -====================== -Since training deep neural network typically take a very long time to get over, performance is gradually becoming -the most important thing in deep learning field. The first step to improve performance is to understand what parts -are slow. There is no point in improving performance of a region which doesn’t take much time! - - -How to do profiling? -==================== -To achieve maximum performance, there are five steps you can take to reach your goals. - -- Profile the code -- Find the slow parts -- Work out why they’re slow -- Make them fast -- Profile the code again - -Usually, processor has two key performance limits include float point throughput and -memory throughput. For GPU, it also need more parallelism to fulfill its potential. -This is why they can be so fast. - -Profiler Tools -============== -For general GPU profiling, a bunch of tools are provided from both NVIDIA and third party. - -**nvprof** is Nvidia profiler and **nvvp** is (GUI based) Nvidia visual profiler. -In this tutorial, we will focus on nvprof and nvvp. - -:code:`test_GpuProfiler` from :code:`paddle/math/tests` directory will be used to evaluate -above profilers. - -.. literalinclude:: ../../../paddle/math/tests/test_GpuProfiler.cpp - :language: c++ - :lines: 137-151 - :linenos: - -The above code snippet includes two methods, you can use any of them to profile the regions of interest. - -1. :code:`REGISTER_TIMER_INFO` is a built-in timer wrapper which can calculate the time overhead of both cpu functions and cuda kernels. - -2. :code:`REGISTER_GPU_PROFILER` is a general purpose wrapper object of :code:`cudaProfilerStart` and :code:`cudaProfilerStop` to avoid -program crashes when CPU version of PaddlePaddle invokes them. - -You can find more details about how to use both of them in the next session. - -Hands-on Approach -================= - -Built-in Timer --------------- - -To enable built-in timer in PaddlePaddle, first you have to add :code:`REGISTER_TIMER_INFO` into the regions of you interest. -Then, all information could be stamped in the console via :code:`printStatus` or :code:`printAllStatus` function. -As a simple example, consider the following: - -1. Add :code:`REGISTER_TIMER_INFO` and :code:`printAllStatus` functions (see the emphasize-lines). - - .. literalinclude:: ../../../paddle/math/tests/test_GpuProfiler.cpp - :language: c++ - :lines: 137-151 - :emphasize-lines: 8-12,14 - :linenos: - -2. Configure cmake with **WITH_TIMER** and recompile PaddlePaddle. - - .. code-block:: bash - - cmake .. -DWITH_TIMER=ON - make - -3. Execute your code and observe the results (see the emphasize-lines). - - .. code-block:: bash - :emphasize-lines: 1,12-15 - - > ./paddle/math/tests/test_GpuProfiler - I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/math/tests/test_GpuProfiler - I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions - I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done. - [==========] Running 1 test from 1 test case. - [----------] Global test environment set-up. - [----------] 1 test from Profiler - [ RUN ] Profiler.BilinearFwdBwd - I1117 11:13:42.845310 2522362816 test_GpuProfiler.cpp:114] Enable GPU Profiler Stat: [testBilinearFwdBwd] "numSamples = 10, channels = 16, im - gSizeX = 64, imgSizeY = 64" - I1117 11:13:42.850154 2522362816 ThreadLocal.cpp:37] thread use undeterministic rand seed:20659751 - I1117 11:13:42.981501 2522362816 Stat.cpp:130] ======= StatSet: [GlobalStatInfo] status ====== - I1117 11:13:42.981539 2522362816 Stat.cpp:133] Stat=testBilinearFwdBwd total=136.141 avg=136.141 max=136.141 min=136.141 count=1 - I1117 11:13:42.981572 2522362816 Stat.cpp:141] ======= BarrierStatSet status ====== - I1117 11:13:42.981575 2522362816 Stat.cpp:154] -------------------------------------------------- - [ OK ] Profiler.BilinearFwdBwd (136 ms) - [----------] 1 test from Profiler (136 ms total) - - [----------] Global test environment tear-down - [==========] 1 test from 1 test case ran. (136 ms total) - [ PASSED ] 1 test. - -nvprof profiler ---------------- - -To use this command line profiler **nvprof**, you can simply issue the following command: - -1. Add :code:`REGISTER_GPU_PROFILER` function (see the emphasize-lines). - - .. literalinclude:: ../../../paddle/math/tests/test_GpuProfiler.cpp - :language: c++ - :lines: 137-151 - :emphasize-lines: 6-7 - :linenos: - -2. Configure cmake with **WITH_PROFILER** and recompile PaddlePaddle. - - .. code-block:: bash - - cmake .. -DWITH_PROFILER=ON - make - -3. Use Nvidia profiler **nvprof** to profile the binary. - - .. code-block:: bash - - nvprof ./paddle/math/tests/test_GpuProfiler - -Then, you can get the following profiling result: - -.. code-block:: bash - - ==78544== Profiling application: ./paddle/math/tests/test_GpuProfiler - ==78544== Profiling result: - Time(%) Time Calls Avg Min Max Name - 27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD] - 26.07% 9.0957ms 1 9.0957ms 9.0957ms 9.0957ms KeBilinearInterpBw - 23.78% 8.2977ms 1 8.2977ms 8.2977ms 8.2977ms KeBilinearInterpFw - 22.55% 7.8661ms 2 3.9330ms 1.5798ms 6.2863ms [CUDA memcpy DtoH] - - ==78544== API calls: - Time(%) Time Calls Avg Min Max Name - 46.85% 682.28ms 8 85.285ms 12.639us 682.03ms cudaStreamCreateWithFlags - 39.83% 580.00ms 4 145.00ms 302ns 550.27ms cudaFree - 9.82% 143.03ms 9 15.892ms 8.7090us 142.78ms cudaStreamCreate - 1.23% 17.983ms 7 2.5690ms 23.210us 6.4563ms cudaMemcpy - 1.23% 17.849ms 2 8.9247ms 8.4726ms 9.3768ms cudaStreamSynchronize - 0.66% 9.5969ms 7 1.3710ms 288.43us 2.4279ms cudaHostAlloc - 0.13% 1.9530ms 11 177.54us 7.6810us 591.06us cudaMalloc - 0.07% 1.0424ms 8 130.30us 1.6970us 453.72us cudaGetDevice - 0.04% 527.90us 40 13.197us 525ns 253.99us cudaEventCreateWithFlags - 0.03% 435.73us 348 1.2520us 124ns 42.704us cuDeviceGetAttribute - 0.03% 419.36us 1 419.36us 419.36us 419.36us cudaGetDeviceCount - 0.02% 260.75us 2 130.38us 129.32us 131.43us cudaGetDeviceProperties - 0.02% 222.32us 2 111.16us 106.94us 115.39us cudaLaunch - 0.01% 214.06us 4 53.514us 28.586us 77.655us cuDeviceGetName - 0.01% 115.45us 4 28.861us 9.8250us 44.526us cuDeviceTotalMem - 0.01% 83.988us 4 20.997us 578ns 77.760us cudaSetDevice - 0.00% 38.918us 1 38.918us 38.918us 38.918us cudaEventCreate - 0.00% 34.573us 31 1.1150us 279ns 12.784us cudaDeviceGetAttribute - 0.00% 17.767us 1 17.767us 17.767us 17.767us cudaProfilerStart - 0.00% 15.228us 2 7.6140us 3.5460us 11.682us cudaConfigureCall - 0.00% 14.536us 2 7.2680us 1.1490us 13.387us cudaGetLastError - 0.00% 8.6080us 26 331ns 173ns 783ns cudaSetupArgument - 0.00% 5.5470us 6 924ns 215ns 2.6780us cuDeviceGet - 0.00% 5.4090us 6 901ns 328ns 3.3320us cuDeviceGetCount - 0.00% 4.1770us 3 1.3920us 1.0630us 1.8300us cuDriverGetVersion - 0.00% 3.4650us 3 1.1550us 1.0810us 1.2680us cuInit - 0.00% 830ns 1 830ns 830ns 830ns cudaRuntimeGetVersion - - -nvvp profiler -------------- - -For visual profiler **nvvp**, you can either import the output of :code:`nvprof –o ...` or -run application through GUI. - -**Note: nvvp also support CPU profiling** (Click the box in nvvp to enable profile execution on CPU). - -.. image:: nvvp1.png - :align: center - :scale: 33% - -From the perspective of kernel functions, **nvvp** can even illustrate why does an operation take a long time? -As shown in the following figure, kernel's block usage, register usage and shared memory usage from :code:`nvvp` -allow us to fully utilize all warps on the GPU. - -.. image:: nvvp2.png - :align: center - :scale: 33% - -From the perspective of application, **nvvp** can give you some suggestions to address performance bottleneck. -For instance, some advice in data movement and compute utilization from the below figure can guide you to tune performance. - -.. image:: nvvp3.png - :align: center - :scale: 33% - -.. image:: nvvp4.png - :align: center - :scale: 33% - -Profiling tips -============== - -- The **nvprof** and **nvvp** output is a very good place to start. -- The timeline is a good place to go next. -- Only dig deep into a kernel if it’s taking a significant amount of your time. -- Where possible, try to match profiler output with theory. - 1) For example, if I know I’m moving 1GB, and my kernel takes 10ms, I expect the profiler to report 100GB/s. - 2) Discrepancies are likely to mean your application isn’t doing what you thought it was. -- Know your hardware: If your GPU can do 6 TFLOPs, and you’re already doing 5.5 TFLOPs, you won’t go much faster! - - -Profiling is a key step in optimization. Sometimes quite simple changes can lead to big improvements in performance. -Your mileage may vary! - -Reference -========= -Jeremy Appleyard, `GPU Profiling for Deep Learning `_, 2015 diff --git a/dev/doc/_sources/howto/usage/cluster/cluster_train_en.md.txt b/dev/doc/_sources/howto/usage/cluster/cluster_train_en.md.txt deleted file mode 100644 index c60876721cbf5565d6e48c8061811aacada748cd..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/cluster/cluster_train_en.md.txt +++ /dev/null @@ -1,156 +0,0 @@ -# Run Distributed Training - -In this article, we explain how to run distributed Paddle training jobs on clusters. We will create the distributed version of the single-process training example, [recommendation](https://github.com/baidu/Paddle/tree/develop/demo/recommendation). - -[Scripts](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train) used in this article launch distributed jobs via SSH. They also work as a reference for users running more sophisticated cluster management systems like MPI and [Kubernetes](https://github.com/PaddlePaddle/Paddle/tree/develop/doc/howto/usage/k8s). - -## Prerequisite - -1. Aforementioned scripts use a Python library [fabric](http://www.fabfile.org/) to run SSH commands. We can use `pip` to install fabric: - - ```bash - pip install fabric - ``` - -1. We need to install PaddlePaddle on all nodes in the cluster. To enable GPUs, we need to install CUDA in `/usr/local/cuda`; otherwise Paddle would report errors at runtime. - -1. Set the `ROOT_DIR` variable in [`cluster_train/conf.py`] on all nodes. For convenience, we often create a Unix user `paddle` on all nodes and set `ROOT_DIR=/home/paddle`. In this way, we can write public SSH keys into `/home/paddle/.ssh/authorized_keys` so that user `paddle` can SSH to all nodes without password. - -## Prepare Job Workspace - -We refer to the directory where we put dependent libraries, config files, etc., as *workspace*. - -These `train/test` data should be prepared before launching cluster job. To satisfy the requirement that train/test data are placed in different directory from workspace, PADDLE refers train/test data according to index file named as `train.list/test.list` which are used in model config file. So the train/test data also contains train.list/test.list two list file. All local training demo already provides scripts to help you create these two files, and all nodes in cluster job will handle files with same logical code in normal condition. - -Generally, you can use same model file from local training for cluster training. What you should have in mind that, the `batch_size` set in `setting` function in model file means batch size in `each` node of cluster job instead of total batch size if synchronization SGD was used. - -Following steps are based on [demo/recommendation](https://github.com/PaddlePaddle/Paddle/tree/develop/demo/recommendation) demo in demo directory. - -You just go through demo/recommendation tutorial doc until `Train` section, and at last you will get train/test data and model configuration file. Finaly, just use demo/recommendation as workspace for cluster training. - -At last your workspace should look like as follow: -``` -. -|-- common_utils.py -|-- data -| |-- config.json -| |-- config_generator.py -| |-- meta.bin -| |-- meta_config.json -| |-- meta_generator.py -| |-- ml-1m -| |-- ml_data.sh -| |-- ratings.dat.test -| |-- ratings.dat.train -| |-- split.py -| |-- test.list -| `-- train.list -|-- dataprovider.py -|-- evaluate.sh -|-- prediction.py -|-- preprocess.sh -|-- requirements.txt -|-- run.sh -`-- trainer_config.py -``` -Not all of these files are needed for cluster training, but it's not necessary to remove useless files. - -`trainer_config.py` -Indicates the model config file. - -`train.list` and `test.list` -File index. It stores all relative or absolute file paths of all train/test data at current node. - -`dataprovider.py` -used to read train/test samples. It's same as local training. - -`data` -all files in data directory are refered by train.list/test.list which are refered by data provider. - - -## Prepare Cluster Job Configuration - -The options below must be carefully set in cluster_train/conf.py - -`HOSTS` all nodes hostname or ip that will run cluster job. You can also append user and ssh port with hostname, such as root@192.168.100.17:9090. - -`ROOT_DIR` workspace ROOT directory for placing JOB workspace directory - -`PADDLE_NIC` the NIC(Network Interface Card) interface name for cluster communication channel, such as eth0 for ethternet, ib0 for infiniband. - -`PADDLE_PORT` port number for cluster commnunication channel - -`PADDLE_PORTS_NUM` the number of port used for cluster communication channle. if the number of cluster nodes is small(less than 5~6nodes), recommend you set it to larger, such as 2 ~ 8, for better network performance. - -`PADDLE_PORTS_NUM_FOR_SPARSE` the number of port used for sparse updater cluster commnunication channel. if sparse remote update is used, set it like `PADDLE_PORTS_NUM` - -`LD_LIBRARY_PATH` set addtional LD_LIBRARY_PATH for cluster job. You can use it to set CUDA libraries path. - -Default Configuration as follow: - -```python -HOSTS = [ - "root@192.168.100.17", - "root@192.168.100.18", - ] - -''' -workspace configuration -''' - -#root dir for workspace -ROOT_DIR = "/home/paddle" - -''' -network configuration -''' -#pserver nics -PADDLE_NIC = "eth0" -#pserver port -PADDLE_PORT = 7164 -#pserver ports num -PADDLE_PORTS_NUM = 2 -#pserver sparse ports num -PADDLE_PORTS_NUM_FOR_SPARSE = 2 - -#environments setting for all processes in cluster job -LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/lib64" -``` - -### Launching Cluster Job -`paddle.py` provides automatical scripts to start all PaddlePaddle cluster processes in different nodes. By default, all command line options can set as `paddle.py` command options and `paddle.py` will transparently and automatically set these options to PaddlePaddle lower level processes. - -`paddle.py`provides two distinguished command option for easy job launching. - -`job_dispatch_package` set it with local `workspace`directory, it will be dispatched to all nodes set in conf.py. It could be helpful for frequent hacking workspace files, otherwise frequent mulit-nodes workspace deployment could make your crazy. -`job_workspace` set it with already deployed workspace directory, `paddle.py` will skip dispatch stage to directly launch cluster job with all nodes. It could help to reduce heavy -dispatch latency. - -`cluster_train/run.sh` provides command line sample to run `demo/recommendation` cluster job, just modify `job_dispatch_package` and `job_workspace` with your defined directory, then: -``` -sh run.sh -``` - -The cluster Job will start in several seconds. - -### Kill Cluster Job -`paddle.py` can capture `Ctrl + C` SIGINT signal to automatically kill all processes launched by it. So just stop `paddle.py` to kill cluster job. You should mannally kill job if program crashed. - -### Check Cluster Training Result -Check log in $workspace/log for details, each node owns same log structure. - -`paddle_trainer.INFO` -It provides almost all interal output log for training, same as local training. Check runtime model convergence here. - -`paddle_pserver2.INFO` -It provides pserver running log, which could help to diagnose distributed error. - -`server.log` -It provides stderr and stdout of pserver process. Check error log if training crashs. - -`train.log` -It provides stderr and stdout of trainer process. Check error log if training crashs. - -### Check Model Output -After one pass finished, model files will be writed in `output` directory in node 0. -`nodefile` in workspace indicates the node id of current cluster job. diff --git a/dev/doc/_sources/howto/usage/cmd_parameter/arguments_en.md.txt b/dev/doc/_sources/howto/usage/cmd_parameter/arguments_en.md.txt deleted file mode 100644 index e5546f0ddc78a9f8bdc306a19c2fe9a415463e5a..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/cmd_parameter/arguments_en.md.txt +++ /dev/null @@ -1,404 +0,0 @@ -# Argument Outline - -It looks like there are a lot of arguments. However, most of them are for developers or alrealy set automatically in cluster submitting environment and users do not need to care about them. Here, we divide these arguments into serveral classes according to the scenario that they are used in. For example, the arguments in `common` can be used in all scenes. Some arguments can be only used in certain layers. Some are needed by multi machines training in cluster, etc. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -√ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
argslocal traincluster trainlocal testcluster test
commonjob
use_gpu
local
config
config_args
num_passes
trainer_count
version
show_layer_stat
traindot_period
test_period
saving_period
show_parameter_stats_period
init_model_path
load_missing_parameter_strategy
saving_period_by_batches
use_old_updater
enable_grad_share
grad_share_block_num
log_error_clipping
log_clipping
save_only_one
start_pass
train/testsave_dir
testing during trainingtest_period
average_test_period
testmodel_list
test_wait
test_pass
predict_output_dir
distribute_test
Auc/PnpairValidationpredict_file
GPUgpu_id
parallel_nn
allow_only_one_model_on_one_gpu
cudnn_dir
cuda_dir
cudnn_conv_workspace_limit_in_mb
RNNbeam_size
rnn_use_batch
prev_batch_state
diy_beam_search_prob_so
metric learningexternal
data_server_port
PServerstart_pserver
pservers
port
port_num
ports_num_for_sparse
nics
rdma_tcp
small_messages
loadsave_parameters_in_pserver
log_period_server
pserver_num_threads
sock_send_buf_size
sock_recv_buf_size
num_gradient_servers
parameter_block_size
parameter_block_size_for_sparse
Async SGDasync_count
async_lagged_ratio_min
async_lagged_ratio_default
Performance Tuninglog_barrier_abstract
log_barrier_lowest_nodes
log_barrier_show_log
check_sparse_distribution_batches
check_sparse_distribution_ratio
check_sparse_distribution_unbalance_degree
check_sparse_distribution_in_pserver
show_check_sparse_distribution_log
Data Providermemory_threshold_on_load_data
RandomNumberseed
thread_local_rand_use_global_seed
UnitTestcheckgrad_eps
Matrix/Vectorenable_parallel_vector
- diff --git a/dev/doc/_sources/howto/usage/cmd_parameter/detail_introduction_en.md.txt b/dev/doc/_sources/howto/usage/cmd_parameter/detail_introduction_en.md.txt deleted file mode 100644 index 33b7ec0d51a96ee126197e7aa819fdae0d3dc353..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/cmd_parameter/detail_introduction_en.md.txt +++ /dev/null @@ -1,336 +0,0 @@ -```eval_rst -.. _cmd_detail_introduction: -``` - -# Detail Description - -## Common - -* `--job` - - Job mode, including: **train, test, checkgrad**, where checkgrad is mainly for developers and users do not need to care about. - - type: string (default: train) - -* `--config` - - Use to specfiy network configure file. - - type: string (default: null). - -* `--use_gpu` - - Whether to use GPU for training, false is cpu mode and true is gpu mode. - - type: bool (default: 1). - -* `--local` - - Whether the training is in local mode or not. True when training locally or using one node in cluster. False when using multiple machines in cluster. - - type: bool (default: 1). - -* `--trainer_count` - - Define the number of threads used in one machine. For example, trainer_count = 4, means use 4 GPU in GPU mode and 4 threads in CPU mode. Each thread (or GPU) is assigned to 1/4 samples in current batch. That is to say, if setting batch_size of 512 in trainer config, each thread train 128 samples. - - type: int32 (default: 1). - -* `--num_passes` - - When `--job=train`, means training for num_passes passes. One pass means training all samples in dataset one time. When `--job=test`, means testing data from model of test_pass to model of (num_passes - 1). - - type: int32 (default: 100). - -* `--config_args` - - arguments passed to config file. Format: key1=value1,key2=value2. - - type: string (default: null). - -* `--version` - - Whether to print version information. - - type: bool (default: 0). - -* `--show_layer_stat` - - Whether to show the statistics of each layer **per batch**. - - type: bool (default: 0). - -## Train - -* `--log_period` - - Log progress every log_period batches. - - type: int32 (default: 100). - -* `--dot_period` - - Print '.' every dot_period batches. - - type: int32 (default: 1). - -* `--saving_period` - - Save parameters every saving_period passes - - type: int32 (default: 1). - -* `--save_dir` - - Directory for saving model parameters. It needs to be specified, but no need to be created in advance. - - type: string (default: null). - -* `--start_pass` - - Start training from this pass. It will load parameters from the previous pass. - - type: int32 (default: 0). - -* `--show_parameter_stats_period` - - Show parameter statistic during training every show_parameter_stats_period batches. It will not show by default. - - type: int32 (default: 0). - -* `--save_only_one` - - Save the parameters only in last pass, while the previous parameters will be removed. - - type: bool (default: 0). - -* `--load_missing_parameter_strategy` - - Specify the loading operation when model file is missing. Now support fail/rand/zero three operations. - - `fail`: program will exit. - - `rand`: uniform or normal distribution according to **initial\_strategy** in network config. Uniform range is: **[mean - std, mean + std]**, where mean and std are configures in trainer config. - - `zero`: all parameters are zero. - - type: string (default: fail). - -* `--init_model_path` - - Path of the initialization model. If it was set, start\_pass will be ignored. It can be used to specify model path in testing mode as well. - - type: string (default: null). - -* `--saving_period_by_batches` - - Save parameters every saving_period_by_batches batches in one pass. - - type: int32 (default: 0). - -* `--log_error_clipping` - - Whether to print error clipping log when setting **error_clipping_threshold** in layer config. If it is true, log will be printed in backward propagation **per batch**. This clipping effects on **gradient of output**. - - type: bool (default: 0). - -* `--log_clipping` - - Enable print log clipping or not when setting **gradient_clipping_threshold** in trainer config. This clipping effects on **gradient w.r.t. (with respect to) weight**. - - type: bool (default: 0). - -* `--use_old_updater` - - Whether to use the old RemoteParameterUpdater. Default use ConcurrentRemoteParameterUpdater. It is mainly for deverlopers and users usually do not need to care about. - - type: bool (default: 0). - -* `--enable_grad_share` - - threshold for enable gradient parameter, which is shared for batch multi-cpu training. - - type: int32 (default: 100 \* 1024 \* 1024). - -* `--grad_share_block_num` - - block number of gradient parameter, which is shared for batch multi-cpu training. - - type: int32 (default: 64). - -## Test - -* `--test_pass` - - Load parameter from this pass to test. - - type: int32 (default: -1). - -* `--test_period` - - if equal 0, do test on all test data at the end of each pass. While if equal non-zero, do test on all test data every test_period batches. - - type: int32 (default: 0). - -* `--test_wait` -  - Whether to wait for parameter per pass if not exist. It can be used when user launch another process to perfom testing during the training process. - - type: bool (default: 0). - -* `--model_list` - - File that saves the model list when testing. - - type: string (default: "", null). - -* `--predict_output_dir` - - Directory that saves the layer output. It is configured in Outputs() in network config. Default, this argument is null, meaning save nothing. Specify this directory if you want to save feature map of some layers in testing mode. Note that, layer outputs are values after activation function. - - type: string (default: "", null). - -* `--average_test_period` - - Do test on average parameter every `average_test_period` batches. It MUST be devided by FLAGS_log_period. Default 0 means do not test on average parameter. - - type: int32 (default: 0). - -* `--distribute_test` - - Testing in distribute environment will merge results from multiple machines. - - type: bool (default: 0). - -* `--predict_file` - - File name for saving predicted result. Default, this argument is null, meaning save nothing. Now, this argument is only used in AucValidationLayer and PnpairValidationLayer, and saves predicted result every pass. - - type: string (default: "", null). - -## GPU - -* `--gpu_id` - - Which gpu core to use. - - type: int32 (default: 0). - -* `--allow_only_one_model_on_one_gpu` - - If true, do not allow multiple models on one GPU device. - - type: bool (default: 1). - -* `--parallel_nn` - - Whether to use multi-thread to calculate one neural network or not. If false, use gpu_id specify which gpu core to use (the device property in trainer config will be ingored). If true, the gpu core is specified in trainer config (gpu_id will be ignored). - - type: bool (default: 0). - -* `--cudnn_dir` - - Choose path to dynamic load NVIDIA CuDNN library, for instance, /usr/local/cuda/lib64. [Default]: LD_LIBRARY_PATH - - type: string (default: "", null) - -* `--cuda_dir` - - Choose path to dynamic load NVIDIA CUDA library, for instance, /usr/local/cuda/lib64. [Default]: LD_LIBRARY_PATH - - type: string (default: "", null) - -* `--cudnn_conv_workspace_limit_in_mb` - - Specify cuDNN max workspace limit, in units MB, 4096MB=4GB by default. - - type: int32 (default: 4096MB=4GB) - -## NLP: RNN/LSTM/GRU -* `--rnn_use_batch` - - Whether to use batch method for calculation in simple RecurrentLayer. - - type: bool (default: 0). - -* `--prev_batch_state` - - batch is continue with next batch. - - type: bool (default: 0). - -* `--beam_size` - - Beam search uses breadth-first search to build its search tree. At each level of the tree, it generates all successors of the states at the current level, sorting them in increasing order of heuristic cost. However, it only stores a predetermined number of best states at each level (called the beam size). - - type: int32 (default: 1). - -* `--diy_beam_search_prob_so` - - Specify shared dynamic library. It can be defined out of paddle by user. - - type: string (default: "", null). - -## Metric Learning -* `--external` - - Whether to use external machine for metric learning. - - type: bool (default: 0). - -* `--data_server_port` - - Listening port for dserver (data server), dserver is mainly used in metric learning. - - type: int32 (default: 21134). - -## DataProvider - -* `--memory_threshold_on_load_data` - - Stop loading data when memory is not sufficient. - - type: double (default: 1.0). - -## Unit Test - -* `--checkgrad_eps` - - parameter change size for checkgrad. - - type: double (default: 1e-05). - -## Parameter Server and Distributed Communication - -* `--start_pserver` - - Whether to start pserver (parameter server). - - type: bool (default: 0). - -* `--pservers` - - Comma separated IP addresses of pservers. - - type: string (default: "127.0.0.1"). - -* `--port` - - Listening port for pserver. - - type: int32 (default: 20134). - -* `--ports_num` - - The ports number for parameter send, increment based on default port number. - - type: int32 (default: 1). - -* `--trainer_id` - - In distributed training, each trainer must be given an unique id ranging from 0 to num_trainers-1. Trainer 0 is the master trainer. User do not need to care this flag. - - type: int32 (default: 0). - -* `--num_gradient_servers` - - Numbers of gradient servers. This arguments is set automatically in cluster submitting environment. - - type: int32 (default: 1). - -* `--small_messages` - - If message size is small, recommend set it True to enable quick ACK and no delay - - type: bool (default: 0). - -* `--sock_send_buf_size` - - Restrict socket send buffer size. It can reduce network congestion if set carefully. - - type: int32 (default: 1024 \* 1024 \* 40). - -* `--sock_recv_buf_size` - - Restrict socket recieve buffer size. - - type: int32 (default: 1024 \* 1024 \* 40). - -* `--parameter_block_size` - - Parameter block size for pserver, will automatically calculate a suitable value if it's not set. - - type: int32 (default: 0). - -* `--parameter_block_size_for_sparse` - - Parameter block size for sparse update pserver, will automatically calculate a suitable value if it's not set. - - type: int32 (default: 0). - -* `--log_period_server` - - Log progress every log_period_server batches at pserver end. - - type: int32 (default: 500). - -* `--loadsave_parameters_in_pserver` - - Load and save parameters in pserver. Only work when parameter set sparse_remote_update. - - type: bool (default: 0). - -* `--pserver_num_threads` - - number of threads for sync op exec. - - type: bool (default: 1). - -* `--ports_num_for_sparse` - - The ports number for parameter send, increment based on default (port + ports_num). It is used by sparse Tranning. - - type: int32 (default: 0). - -* `--nics` - - Network device name for pservers, already set in cluster submitting environment. - - type: string (default: "xgbe0,xgbe1"). - -* `--rdma_tcp` - - Use rdma or tcp transport protocol, already set in cluster submitting environment. - - type: string (default: "tcp"). - -## Async SGD -* `--async_count` - - Defined the asynchronous training length, if 0, then use synchronized training. - - type: int32 (default: 0). - -* `--async_lagged_ratio_min` - - Control the minimize value of `config_.async_lagged_grad_discard_ratio()`. - - type: double (default: 1.0). - -* `--async_lagged_ratio_default` - - If async_lagged_grad_discard_ratio is not set in network config, use it as defalut value. - - type: double (default: 1.5). - -## Performance Tuning - -* `--log_barrier_abstract` - - If true, show abstract barrier performance information. - - type: bool (default: 1). - -* `--log_barrier_show_log` - - If true, always show barrier abstract even with little gap. - - type: bool (default: 0). - -* `--log_barrier_lowest_nodes` - - How many lowest node will be logged. - - type: int32 (default: 5). - -* `--check_sparse_distribution_in_pserver` - - Whether to check that the distribution of sparse parameter on all pservers is balanced. - - type: bool (default: 0). - -* `--show_check_sparse_distribution_log` - - show log details for sparse parameter distribution in pserver. - - type: bool (default: 0). - -* `--check_sparse_distribution_batches` - - Running sparse parameter distribution check every so many batches. - - type: int32 (default: 100). - -* `--check_sparse_distribution_ratio` - - If parameters dispatched to different pservers have an unbalanced distribution for check_sparse_distribution_ratio * check_sparse_distribution_batches times, crash program. - - type: double (default: 0.6). - -* `--check_sparse_distribution_unbalance_degree` - - The ratio of maximum data size / minimun data size for different pserver. - - type: double (default: 2). - -## Matrix/Vector/RandomNumber -* `--enable_parallel_vector` - - threshold for enable parallel vector. - - type: int32 (default: 0). - -* `--seed` - - random number seed. 0 for srand(time) - - type: int32 (default: 1) - -* `--thread_local_rand_use_global_seed` - - Whether to use global seed in rand of thread local. - - type: bool (default: 0). diff --git a/dev/doc/_sources/howto/usage/cmd_parameter/index_en.rst.txt b/dev/doc/_sources/howto/usage/cmd_parameter/index_en.rst.txt deleted file mode 100644 index 0e3c72d27aca063f1b6f1c23e55718dba373c40a..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/cmd_parameter/index_en.rst.txt +++ /dev/null @@ -1,11 +0,0 @@ -.. _cmd_line_index: - -Set Command-line Parameters -=========================== - -.. toctree:: - :maxdepth: 1 - - use_case_en.md - arguments_en.md - detail_introduction_en.md diff --git a/dev/doc/_sources/howto/usage/cmd_parameter/use_case_en.md.txt b/dev/doc/_sources/howto/usage/cmd_parameter/use_case_en.md.txt deleted file mode 100644 index e287f0c4b9617cbc6504596512bf408c56dc10f9..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/cmd_parameter/use_case_en.md.txt +++ /dev/null @@ -1,182 +0,0 @@ -# Use Case - -## Local Training - -These command line arguments are commonly used by local training experiments, such as image classification, natural language processing, et al. - -``` -paddle train \ - --use_gpu=1/0 \ #1:GPU,0:CPU(default:true) - --config=network_config \ - --save_dir=output \ - --trainer_count=COUNT \ #(default:1) - --test_period=M \ #(default:0) - --num_passes=N \ #(defalut:100) - --log_period=K \ #(default:100) - --dot_period=1000 \ #(default:1) - #[--show_parameter_stats_period=100] \ #(default:0) - #[--saving_period_by_batches=200] \ #(default:0) -``` -`show_parameter_stats_period` and `saving_period_by_batches` are optional according to your task. - -### 1) Pass Command Argument to Network config - -`config_args` is a useful parameter to pass arguments to network config. - -``` ---config_args=generating=1,beam_size=5,layer_num=10 \ -``` -And `get_config_arg` can be used to parse these arguments in network config as follows: - -``` -generating = get_config_arg('generating', bool, False) -beam_size = get_config_arg('beam_size', int, 3) -layer_num = get_config_arg('layer_num', int, 8) -``` - -`get_config_arg`: - -``` -get_config_arg(name, type, default_value) -``` -- name: the name specified in the `--config_args` -- type: value type, bool, int, str, float etc. -- default_value: default value if not set. - -### 2) Use Model to Initialize Network - -add argument: - -``` ---init_model_path=model_path ---load_missing_parameter_strategy=rand -``` - -## Local Testing - -Method 1: - -``` -paddle train --job=test \ - --use_gpu=1/0 \ - --config=network_config \ - --trainer_count=COUNT \ - --init_model_path=model_path \ -``` -- use init\_model\_path to specify test model. -- only can test one model. - -Method 2: - -``` -paddle train --job=test \ - --use_gpu=1/0 \ - --config=network_config \ - --trainer_count=COUNT \ - --model_list=model.list \ -``` -- use model_list to specify test models -- can test several models, where model.list likes: - -``` -./alexnet_pass1 -./alexnet_pass2 -``` - -Method 3: - -``` -paddle train --job=test \ - --use_gpu=1/0 \ - --config=network_config \ - --trainer_count=COUNT \ - --save_dir=model \ - --test_pass=M \ - --num_passes=N \ -``` -This way must use model path saved by Paddle like this: `model/pass-%5d`. Testing model is from M-th pass to (N-1)-th pass. For example: M=12 and N=14 will test `model/pass-00012` and `model/pass-00013`. - -## Sparse Training - -Sparse training is usually used to accelerate calculation when input is sparse data with highly dimension. For example, dictionary dimension of input data is 1 million, but one sample just have several words. In paddle, sparse matrix multiplication is used in forward propagation and sparse updating is perfomed on weight updating after backward propagation. - -### 1) Local training - -You need to set **sparse\_update=True** in network config. Check the network config documentation for more details. - -### 2) cluster training - -Add the following argument for cluster training of a sparse model. At the same time you need to set **sparse\_remote\_update=True** in network config. Check the network config documentation for more details. - -``` ---ports_num_for_sparse=1 #(default: 0) -``` - -## parallel_nn -`parallel_nn` can be set to mixed use of GPUs and CPUs to compute layers. That is to say, you can deploy network to use a GPU to compute some layers and use a CPU to compute other layers. The other way is to split layers into different GPUs, which can **reduce GPU memory** or **use parallel computation to accelerate some layers**. - -If you want to use these characteristics, you need to specify device ID in network config (denote it as deviceId) and add command line argument: - -``` ---parallel_nn=true -``` -### case 1: Mixed Use of GPU and CPU -Consider the following example: - -``` -#command line: -paddle train --use_gpu=true --parallel_nn=true trainer_count=COUNT - -default_device(0) - -fc1=fc_layer(...) -fc2=fc_layer(...) -fc3=fc_layer(...,layer_attr=ExtraAttr(device=-1)) - -``` -- default_device(0): set default device ID to 0. This means that except the layers with device=-1, all layers will use a GPU, and the specific GPU used for each layer depends on trainer\_count and gpu\_id (0 by default). Here, layer fc1 and fc2 are computed on the GPU. - -- device=-1: use the CPU for layer fc3. - -- trainer_count: - - trainer_count=1: if gpu\_id is not set, then use the first GPU to compute layers fc1 and fc2. Otherwise use the GPU with gpu\_id. - - - trainer_count>1: use trainer\_count GPUs to compute one layer using data parallelism. For example, trainer\_count=2 means that GPUs 0 and 1 will use data parallelism to compute layer fc1 and fc2. - -### Case 2: Specify Layers in Different Devices - -``` -#command line: -paddle train --use_gpu=true --parallel_nn=true --trainer_count=COUNT - -#network: -fc2=fc_layer(input=l1, layer_attr=ExtraAttr(device=0), ...) -fc3=fc_layer(input=l1, layer_attr=ExtraAttr(device=1), ...) -fc4=fc_layer(input=fc2, layer_attr=ExtraAttr(device=-1), ...) -``` -In this case, we assume that there are 4 GPUs in one machine. - -- trainer_count=1: - - Use GPU 0 to compute layer fc2. - - Use GPU 1 to compute layer fc3. - - Use CPU to compute layer fc4. - -- trainer_count=2: - - Use GPU 0 and 1 to compute layer fc2. - - Use GPU 2 and 3 to compute layer fc3. - - Use CPU to compute fc4 in two threads. - -- trainer_count=4: - - It will fail (note, we have assumed that there are 4 GPUs in machine), because argument `allow_only_one_model_on_one_gpu` is true by default. - -**Allocation of device ID when `device!=-1`**: - -``` -(deviceId + gpu_id + threadId * numLogicalDevices_) % numDevices_ - -deviceId: specified in layer. -gpu_id: 0 by default. -threadId: thread ID, range: 0,1,..., trainer_count-1 -numDevices_: device (GPU) count in machine. -numLogicalDevices_: min(max(deviceId + 1), numDevices_) -``` diff --git a/dev/doc/_sources/howto/usage/k8s/k8s_aws_en.md.txt b/dev/doc/_sources/howto/usage/k8s/k8s_aws_en.md.txt deleted file mode 100644 index ce72b0803818d5bf0c18753c421848cf2fc1b668..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/k8s/k8s_aws_en.md.txt +++ /dev/null @@ -1,689 +0,0 @@ - -# Distributed PaddlePaddle Training on AWS with Kubernetes - -We will show you step by step on how to run distributed PaddlePaddle training on AWS cluster with Kubernetes. Let's start from core concepts. - -## Distributed PaddlePaddle Training Core Concepts - -### Distributed Training Job - -A distributed training job is represented by a [Kubernetes job](https://kubernetes.io/docs/user-guide/jobs/#what-is-a-job). - -Each Kuberentes job is described by a job config file, which specifies the information like the number of [pods](https://kubernetes.io/docs/user-guide/pods/#what-is-a-pod) in the job and environment variables. - -In a distributed training job, we would: - -1. prepare partitioned training data and configuration file on a distributed file system (in this tutorial we use Amazon Elastic File System), and -1. create and submit the Kubernetes job config to the Kubernetes cluster to start the training job. - -### Parameter Servers and Trainers - -There are two roles in a PaddlePaddle cluster: *parameter server (pserver)* and *trainer*. Each parameter server process maintains a shard of the global model. Each trainer has its local copy of the model, and uses its local data to update the model. During the training process, trainers send model updates to parameter servers, parameter servers are responsible for aggregating these updates, so that trainers can synchronize their local copy with the global model. - -
![Model is partitioned into two shards. Managed by two parameter servers respectively.](src/pserver_and_trainer.png)
- -In order to communicate with pserver, trainer needs to know the ip address of each pserver. In kubernetes it's better to use a service discovery mechanism (e.g., DNS hostname) rather than static ip address, since any pserver's pod may be killed and a new pod could be schduled onto another node of different ip address. However, now we are using static ip. This will be improved. - -Parameter server and trainer are packaged into a same docker image. They will run once pod is scheduled by kubernetes job. - -### Trainer ID - -Each trainer process requires a trainer ID, a zero-based index value, passed in as a command-line parameter. The trainer process thus reads the data partition indexed by this ID. - -### Training - -The entry-point of a container is a shell script. It can see some environment variables pre-defined by Kubernetes. This includes one that gives the job's identity, which can be used in a remote call to the Kubernetes apiserver that lists all pods in the job. - -We rank each pod by sorting them by their ips. The rank of each pod could be the "pod ID". Because we run one trainer and one parameter server in each pod, we can use this "pod ID" as the trainer ID. A detailed workflow of the entry-point script is as follows: - -1. Query the api server to get pod information, and assign the `trainer_id` by sorting the ip. -1. Copy the training data from EFS persistent volume into container. -1. Parse the `paddle pserver` and `paddle trainer` startup parameters from environment variables, and then start up the processes. -1. Trainer with `train_id` 0 will automatically write results onto EFS volume. - - -## PaddlePaddle on AWS with Kubernetes - -### Choose AWS Service Region -This tutorial requires several AWS services work in the same region. Before we create anything in AWS, please check the following link -https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/ -Choose a region which has the following services available: EC2, EFS, VPS, CloudFormation, KMS, VPC, S3. -In this tutorial, we use "Oregon(us-west-2)" as example. - -### Create AWS Account and IAM Account - -Under each AWS account, we can create multiple [IAM](http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html) users. This allows us to grant some privileges to each IAM user and to create/operate AWS clusters as an IAM user. - -To sign up an AWS account, please -follow -[this guide](http://docs.aws.amazon.com/lambda/latest/dg/setting-up.html). -To create IAM users and user groups under an AWS account, please -follow -[this guide](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html). - -Please be aware that this tutorial needs the following privileges for the user in IAM: - -- AmazonEC2FullAccess -- AmazonS3FullAccess -- AmazonRoute53FullAccess -- AmazonRoute53DomainsFullAccess -- AmazonElasticFileSystemFullAccess -- AmazonVPCFullAccess -- IAMUserSSHKeys -- IAMFullAccess -- NetworkAdministrator -- AWSKeyManagementServicePowerUser - - -### Download kube-aws and kubectl - -#### kube-aws - -[kube-aws](https://github.com/coreos/kube-aws) is a CLI tool to automate cluster deployment to AWS. -##### Verify kube-aws integrity -Note: if you are using a non-official release (e.g RC release) kube-aws, you can skip this setp. -Import the CoreOS Application Signing Public Key: - -``` -gpg2 --keyserver pgp.mit.edu --recv-key FC8A365E -``` - -Validate the key fingerprint: - -``` -gpg2 --fingerprint FC8A365E -``` -The correct key fingerprint is `18AD 5014 C99E F7E3 BA5F 6CE9 50BD D3E0 FC8A 365E` - -We can download `kube-aws` from its [release page](https://github.com/coreos/kube-aws/releases). In this tutorial, we use version 0.9.1 - -Validate the tarball's GPG signature: - -``` -PLATFORM=linux-amd64 - # Or -PLATFORM=darwin-amd64 - -gpg2 --verify kube-aws-${PLATFORM}.tar.gz.sig kube-aws-${PLATFORM}.tar.gz -``` -##### Install kube-aws -Extract the binary: - -``` -tar zxvf kube-aws-${PLATFORM}.tar.gz -``` - -Add kube-aws to your path: - -``` -mv ${PLATFORM}/kube-aws /usr/local/bin -``` - - -#### kubectl - -[kubectl](https://kubernetes.io/docs/user-guide/kubectl-overview/) is a command line interface for running commands against Kubernetes clusters. - -Download `kubectl` from the Kubernetes release artifact site with the `curl` tool. - -``` -# OS X -curl -O https://storage.googleapis.com/kubernetes-release/release/"$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)"/bin/darwin/amd64/kubectl - -# Linux -curl -O https://storage.googleapis.com/kubernetes-release/release/"$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)"/bin/linux/amd64/kubectl -``` - -Make the kubectl binary executable and move it to your PATH (e.g. `/usr/local/bin`): - -``` -chmod +x ./kubectl -sudo mv ./kubectl /usr/local/bin/kubectl -``` - -### Configure AWS Credentials - -First check out [this](http://docs.aws.amazon.com/cli/latest/userguide/installing.html) for installing the AWS command line interface. - -And then configure your AWS account information: - -``` -aws configure -``` - - -Fill in the required fields: - - -``` -AWS Access Key ID: YOUR_ACCESS_KEY_ID -AWS Secrete Access Key: YOUR_SECRETE_ACCESS_KEY -Default region name: us-west-2 -Default output format: json -``` - -`YOUR_ACCESS_KEY_ID`, and `YOUR_SECRETE_ACCESS_KEY` is the IAM key and secret from [Create AWS Account and IAM Account](#create-aws-account-and-iam-account) - -Verify that your credentials work by describing any instances you may already have running on your account: - -``` -aws ec2 describe-instances -``` - -### Define Cluster Parameters - -#### EC2 key pair - -The keypair that will authenticate SSH access to your EC2 instances. The public half of this key pair will be configured on each CoreOS node. - -Follow [EC2 Keypair User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) to create a EC2 key pair - -After creating a key pair, you will use the key pair name to configure the cluster. - -Key pairs are only available to EC2 instances in the same region. We are using us-west-2 in our tutorial, so make sure to creat key pairs in that region (Oregon). - -Your browser will download a `key-name.pem` file which is the key to access the EC2 instances. We will use it later. - - -#### KMS key - -Amazon KMS keys are used to encrypt and decrypt cluster TLS assets. If you already have a KMS Key that you would like to use, you can skip creating a new key and provide the Arn string for your existing key. - -You can create a KMS key with the aws command line tool: - -``` -aws kms --region=us-west-2 create-key --description="kube-aws assets" -{ - "KeyMetadata": { - "CreationDate": 1458235139.724, - "KeyState": "Enabled", - "Arn": "arn:aws:kms:us-west-2:aaaaaaaaaaaaa:key/xxxxxxxxxxxxxxxxxxx", - "AWSAccountId": "xxxxxxxxxxxxx", - "Enabled": true, - "KeyUsage": "ENCRYPT_DECRYPT", - "KeyId": "xxxxxxxxx", - "Description": "kube-aws assets" - } -} -``` - -We will need to use the value of `Arn` later. - -And then let's add several inline policies in your IAM user permission. - -Go to [IAM Console](https://console.aws.amazon.com/iam/home?region=us-west-2#/home). Click on button `Users`, click user that we just created, and then click on `Add inline policy` button, and select `Custom Policy`. - -Paste into following inline policies: - -``` - (Caution: node_0, node_1, node_2 directories represents PaddlePaddle node and train_id, not the Kubernetes node){ - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Stmt1482205552000", - "Effect": "Allow", - "Action": [ - "kms:Decrypt", - "kms:Encrypt" - ], - "Resource": [ - "arn:aws:kms:*:AWS_ACCOUNT_ID:key/*" - ] - }, - { - "Sid": "Stmt1482205746000", - "Effect": "Allow", - "Action": [ - "cloudformation:CreateStack", - "cloudformation:UpdateStack", - "cloudformation:DeleteStack", - "cloudformation:DescribeStacks", - "cloudformation:DescribeStackResource", - "cloudformation:GetTemplate", - "cloudformation:DescribeStackEvents" - ], - "Resource": [ - "arn:aws:cloudformation:us-west-2:AWS_ACCOUNT_ID:stack/MY_CLUSTER_NAME/*" - ] - } - ] -} -``` -`Version` : Its value has to be exactly "2012-10-17". -`AWS_ACCOUNT_ID`: You can get it from following command line: - -``` -aws sts get-caller-identity --output text --query Account -``` - -`MY_CLUSTER_NAME`: Pick a MY_CLUSTER_NAME that you like, you will use it later as well. -Please note, stack name must satisfy regular expression pattern: [a-zA-Z][-a-zA-Z0-9*]*, which means no "_" or "-" in stack name, or kube-aws will throw error in later steps. - -#### External DNS name - -When the cluster is created, the controller will expose the TLS-secured API on a DNS name. - -DNS name should have a CNAME points to cluster DNS name or an A record points to the cluster IP address. - -We will need to use DNS name later in tutorial. If you don't already own one, you can choose any DNS name (e.g., `paddle`) and modify `/etc/hosts` to associate cluster IP with that DNS name for your local machine. And add name service (route53) in aws to associate the IP to paddle for cluster. We will find the cluster IP in later steps. - -#### S3 bucket - -You need to create an S3 bucket before startup the Kubernetes cluster. - -There are some bugs in aws cli in creating S3 bucket, so let's use the [S3 Console](https://console.aws.amazon.com/s3/home?region=us-west-2). - -Click on `Create Bucket`, fill in a unique BUCKET_NAME, and make sure region is us-west-2 (Oregon). - - -#### Initialize Assets - -Create a directory on your local machine to hold the generated assets: - -``` -$ mkdir my-cluster -$ cd my-cluster -``` - -Initialize the cluster CloudFormation stack with the KMS Arn, key pair name, and DNS name from the previous step: - -``` -kube-aws init \ ---cluster-name=MY_CLUSTER_NAME \ ---external-dns-name=MY_EXTERNAL_DNS_NAME \ ---region=us-west-2 \ ---availability-zone=us-west-2a \ ---key-name=KEY_PAIR_NAME \ ---kms-key-arn="arn:aws:kms:us-west-2:xxxxxxxxxx:key/xxxxxxxxxxxxxxxxxxx" -``` - -`MY_CLUSTER_NAME`: the one you picked in [KMS key](#kms-key) - -`MY_EXTERNAL_DNS_NAME`: see [External DNS name](#external-dns-name) - -`KEY_PAIR_NAME`: see [EC2 key pair](#ec2-key-pair) - -`--kms-key-arn`: the "Arn" in [KMS key](#kms-key) - -Here `us-west-2a` is used for parameter `--availability-zone`, but supported availability zone varies among AWS accounts. - -Please check if `us-west-2a` is supported by `aws ec2 --region us-west-2 describe-availability-zones`, if not switch to other supported availability zone. (e.g., `us-west-2a`, or `us-west-2b`) - - -There will now be a cluster.yaml file in the asset directory. This is the main configuration file for your cluster. - -By default `kube-aws` will only create one worker node. Let's edit `cluster.yaml` and change `workerCount` from 1 to 3. - - -#### Render contents of the asset directory - -In the simplest case, you can have kube-aws generate both your TLS identities and certificate authority for you. - -``` -kube-aws render credentials --generate-ca -``` - -The next command generates the default set of cluster assets in your asset directory. - -``` -kube-aws render stack -``` -Assets (templates and credentials) that are used to create, update and interact with your Kubernetes cluster will be created under your current folder. - - -### Kubernetes Cluster Start Up - -#### Create the instances defined in the CloudFormation template - -Now let's create your cluster (choose any `PREFIX` for the command below): - -``` -kube-aws up --s3-uri s3://BUCKET_NAME/PREFIX -``` - -`BUCKET_NAME`: the bucket name that you used in [S3 bucket](#s3-bucket) - - -#### Configure DNS - -You can invoke `kube-aws status` to get the cluster API endpoint after cluster creation. - -``` -$ kube-aws status -Cluster Name: paddle-cluster -Controller DNS Name: paddle-cl-ElbAPISe-EEOI3EZPR86C-531251350.us-west-2.elb.amazonaws.com -``` - -If you own a DNS name, set the A record to any of the above ip. __Or__ you can set up CNAME point to `Controller DNS Name` (`paddle-cl-ElbAPISe-EEOI3EZPR86C-531251350.us-west-2.elb.amazonaws.com`) - -##### Find IP address - -Use command `dig` to check the load balancer hostname to get the ip address. - -``` -$ dig paddle-cl-ElbAPISe-EEOI3EZPR86C-531251350.us-west-2.elb.amazonaws.com - -;; QUESTION SECTION: -;paddle-cl-ElbAPISe-EEOI3EZPR86C-531251350.us-west-2.elb.amazonaws.com. IN A - -;; ANSWER SECTION: -paddle-cl-ElbAPISe-EEOI3EZPR86C-531251350.us-west-2.elb.amazonaws.com. 59 IN A 54.241.164.52 -paddle-cl-ElbAPISe-EEOI3EZPR86C-531251350.us-west-2.elb.amazonaws.com. 59 IN A 54.67.102.112 -``` - -In the above output, both ip `54.241.164.52`, `54.67.102.112` will work. - -*If you own a DNS name*, set the A record to any of the above ip. Then you can skip to the step "Access the cluster". - -*If you do not own a DNS name*: -##### Update local DNS association -Edit `/etc/hosts` to associate above ip with the DNS name. -##### Add Route53 private name service in VPC - - Open [Route53 Console](https://console.aws.amazon.com/route53/home) - - Create hosted zone with following config - - Domain name: "paddle" - - Type: "Private hosted zone for amazon VPC" - - VPC ID: `` - - ![route53 zone setting](src/route53_create_zone.png) - - Add A record - - Click on the zone "paddle" just created - - Click the button "Create record set" - - Name : leave blank - - type: "A" - - Value: `` - - ![route53 create recordset](src/route53_create_recordset.png) - - Verify name service - - Connect to any instance created by kube-aws via ssh - - Run command "host paddle", see if the ip returned is the private ip of kube-controller - -#### Access the cluster - -Once the API server is running, you should see: - -``` -$ kubectl --kubeconfig=kubeconfig get nodes -NAME STATUS AGE -ip-10-0-0-134.us-west-2.compute.internal Ready 6m -ip-10-0-0-238.us-west-2.compute.internal Ready 6m -ip-10-0-0-50.us-west-2.compute.internal Ready 6m -ip-10-0-0-55.us-west-2.compute.internal Ready 6m -``` - - -### Setup Elastic File System for Cluster - -Training data is usually served on a distributed filesystem, we use Elastic File System (EFS) on AWS. - -1. Create security group for EFS in [security group console](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#SecurityGroups:sort=groupId) - 1. Look up security group id for `paddle-cluster-sg-worker` (`sg-055ee37d` in the image below) -
![](src/worker_security_group.png)
- 2. Add security group `paddle-efs` with `ALL TCP` inbound rule and custom source as group id of `paddle-cluster-sg-worker`. And VPC of `paddle-cluster-vpc`. Make sure availability zone is same as the one you used in [Initialize Assets](#initialize-assets). -
![](src/add_security_group.png)
- -2. Create the Elastic File System in [EFS console](https://us-west-2.console.aws.amazon.com/efs/home?region=us-west-2#/wizard/1) with `paddle-cluster-vpc` VPC. Make sure subnet is `paddle-cluster-Subnet0` andd security group is `paddle-efs`. -
![](src/create_efs.png)
- - -### Start PaddlePaddle Training Demo on AWS - -#### Configure Kubernetes Volume that Points to EFS - -First we need to create a [PersistentVolume](https://kubernetes.io/docs/user-guide/persistent-volumes/) to provision EFS volumn. - -Save following snippet as `pv.yaml` -``` -apiVersion: v1 -kind: PersistentVolume -metadata: - name: efsvol -spec: - capacity: - storage: 100Gi - accessModes: - - ReadWriteMany - nfs: - server: EFS_DNS_NAME - path: "/" -``` - -`EFS_DNS_NAME`: DNS name as shown in description of `paddle-efs` that we created. Looks similar to `fs-2cbf7385.efs.us-west-2.amazonaws.com` - -Run following command to create a persistent volumn: -``` -kubectl --kubeconfig=kubeconfig create -f pv.yaml -``` - -Next let's create a [PersistentVolumeClaim](https://kubernetes.io/docs/user-guide/persistent-volumes/) to claim the persistent volume. - -Save following snippet as `pvc.yaml`. -``` -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: efsvol -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 50Gi -``` - -Run following command to create a persistent volumn claim: -``` -kubectl --kubeconfig=kubeconfig create -f pvc.yaml -``` - -#### Prepare Training Data - -We will now launch a kubernetes job that downloads, saves and evenly splits training data into 3 shards on the persistent volumn that we just created. - -save following snippet as `paddle-data-job.yaml` -``` -apiVersion: batch/v1 -kind: Job -metadata: - name: paddle-data -spec: - template: - metadata: - name: pi - spec: - containers: - - name: paddle-data - image: paddledev/paddle-tutorial:k8s_data - imagePullPolicy: Always - volumeMounts: - - mountPath: "/efs" - name: efs - env: - - name: OUT_DIR - value: /efs/paddle-cluster-job - - name: SPLIT_COUNT - value: "3" - volumes: - - name: efs - persistentVolumeClaim: - claimName: efsvol - restartPolicy: Never -``` - -Run following command to launch the job: -``` -kubectl --kubeconfig=kubeconfig create -f paddle-data-job.yaml -``` - -Job may take 7 min to finish, use following command to check job status. Do not proceed until `SUCCESSFUL` for `paddle-data` job is `1` -``` -$ kubectl --kubeconfig=kubeconfig get jobs -NAME DESIRED SUCCESSFUL AGE -paddle-data 1 1 6m -``` - -Data preparation is done by docker image `paddledev/paddle-tutorial:k8s_data`, see [here](src/k8s_data/README.md) for how to build this docker image and source code. - -#### Start Training - -Now we are ready to start paddle training job. Save following snippet as `paddle-cluster-job.yaml` -``` -apiVersion: batch/v1 -kind: Job -metadata: - name: paddle-cluster-job -spec: - parallelism: 3 - completions: 3 - template: - metadata: - name: paddle-cluster-job - spec: - volumes: - - name: efs - persistentVolumeClaim: - claimName: efsvol - containers: - - name: trainer - image: paddledev/paddle-tutorial:k8s_train - command: ["bin/bash", "-c", "/root/start.sh"] - env: - - name: JOB_NAME - value: paddle-cluster-job - - name: JOB_PATH - value: /home/jobpath - - name: JOB_NAMESPACE - value: default - - name: TRAIN_CONFIG_DIR - value: quick_start - - name: CONF_PADDLE_NIC - value: eth0 - - name: CONF_PADDLE_PORT - value: "7164" - - name: CONF_PADDLE_PORTS_NUM - value: "2" - - name: CONF_PADDLE_PORTS_NUM_SPARSE - value: "2" - - name: CONF_PADDLE_GRADIENT_NUM - value: "3" - - name: TRAINER_COUNT - value: "3" - volumeMounts: - - mountPath: "/home/jobpath" - name: efs - ports: - - name: jobport0 - hostPort: 7164 - containerPort: 7164 - - name: jobport1 - hostPort: 7165 - containerPort: 7165 - - name: jobport2 - hostPort: 7166 - containerPort: 7166 - - name: jobport3 - hostPort: 7167 - containerPort: 7167 - restartPolicy: Never -``` - -`parallelism: 3, completions: 3` means this job will simultaneously start 3 PaddlePaddle pods, and this job will be finished when there are 3 finished pods. - -`env` field represents container's environment variables, we specify PaddlePaddle parameters by environment variables. - -`ports` indicates that TCP port 7164 - 7167 are exposed for communication between `pserver` ans trainer. port starts continously from `CONF_PADDLE_PORT` (7164) to `CONF_PADDLE_PORT + CONF_PADDLE_PORTS_NUM + CONF_PADDLE_PORTS_NUM_SPARSE - 1` (7167). We use multiple ports for dense and sparse paramter updates to improve latency. - -Run following command to launch the job. -``` -kubectl --kubeconfig=kubeconfig create -f paddle-claster-job.yaml -``` - -Inspect individual pods - -``` -$ kubectl --kubeconfig=kubeconfig get pods -NAME READY STATUS RESTARTS AGE -paddle-cluster-job-cm469 1/1 Running 0 9m -paddle-cluster-job-fnt03 1/1 Running 0 9m -paddle-cluster-job-jx4xr 1/1 Running 0 9m -``` - -Inspect individual console output -``` -kubectl --kubeconfig=kubeconfig log -f POD_NAME -``` - -`POD_NAME`: name of any pod (e.g., `paddle-cluster-job-cm469`). - -Run `kubectl --kubeconfig=kubeconfig describe job paddle-cluster-job` to check training job status. It will complete in around 20 minutes. - -The details for start `pserver` and `trainer` are hidden inside docker image `paddledev/paddle-tutorial:k8s_train`, see [here](src/k8s_train/README.md) for how to build the docker image and source code. - -#### Inspect Training Output - -Training output (model snapshot and logs) will be saved in EFS. We can ssh into worker EC2 instance, mount EFS and check training output. - -1. ssh Into Worker EC2 instance -``` -chmod 400 key-name.pem -ssh -i key-name.pem core@INSTANCE_IP -``` - -`INSTANCE_IP`: public IP address of EC2 kubernetes worker node. Go to [EC2 console](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#Instances:sort=instanceId) and check `public IP` of any `paddle-cluster-kube-aws-worker` instance. - -2. Mount EFS -``` -mkdir efs -sudo mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 EFS_DNS_NAME:/ efs -``` - -`EFS_DNS_NAME`: DNS name as shown in description of `paddle-efs` that we created. Look similar to `fs-2cbf7385.efs.us-west-2.amazonaws.com`. - -Now folder `efs` will have structure similar to: -``` --- paddle-cluster-job - |-- ... - |-- output - | |-- node_0 - | | |-- server.log - | | `-- train.log - | |-- node_1 - | | |-- server.log - | | `-- train.log - | |-- node_2 - | | |-- server.log - | | `-- train.log - | |-- pass-00000 - | | |-- ___fc_layer_0__.w0 - | | |-- ___fc_layer_0__.wbias - | | |-- done - | | |-- path.txt - | | `-- trainer_config.lr.py - | |-- pass-00001... -``` -`server.log` contains log for `pserver`. `train.log` contains log for `trainer`. Model description and snapshot is stored in `pass-0000*`. - -### Kubernetes Cluster Tear Down - -#### Delete EFS - -Go to [EFS Console](https://us-west-2.console.aws.amazon.com/efs/home?region=us-west-2) and delete the EFS volumn that we created. - -#### Delete security group - -Go to [Security Group Console](https://us-west-2.console.aws.amazon.com/ec2/v2/home?region=us-west-2#SecurityGroups:sort=groupId) and delete security group `paddle-efs`. - - -#### Delete S3 Bucket - -Go to [S3 Console](https://console.aws.amazon.com/s3/home?region=us-west-2#) and delete the S3 bucket that we created. - -#### Destroy Cluster - -``` -kube-aws destroy -``` - -The command will return immediately, but it might take 5 min to tear down the whole cluster. - -You can go to [CludFormation Console](https://us-west-2.console.aws.amazon.com/cloudformation/home?region=us-west-2#/stacks?filter=active) to check destroy process. diff --git a/dev/doc/_sources/howto/usage/k8s/k8s_en.md.txt b/dev/doc/_sources/howto/usage/k8s/k8s_en.md.txt deleted file mode 100644 index 0c3ab05b708e7a924577c26496b8c55126e76c62..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/k8s/k8s_en.md.txt +++ /dev/null @@ -1,201 +0,0 @@ -# Paddle On Kubernetes - ->In this article, we will introduce how to run Paddle training job on single CPU machine using Kubernetes. In next article, we will introduce how to run Paddle training job on distributed cluster. - -## Build Docker Image - -In distributed Kubernetes cluster, we will use Ceph or other shared storage system for storing training related data so that all processes in Paddle training can retrieve data from Ceph. In this example, we will only demo training job on single machine. In order to simplify the requirement of the environment, we will directly put training data into Paddle's Docker Image, so we need to create a Paddle Docker image that already includes the training data. - -Paddle's [Quick Start Tutorial](http://www.paddlepaddle.org/doc/demo/quick_start/index_en.html) introduces how to download and train data by using script from Paddle's source code. -And `paddledev/paddle:cpu-demo-latest` image has the Paddle source code and demo. (Caution: Default Paddle image `paddledev/paddle:cpu-latest` doesn't include the source code, Paddle's different versions of image can be referred here: [Docker installation guide](http://www.paddlepaddle.org/doc/build/docker_install.html)), so we run this container and download the training data, and then commit the whole container to be a new Docker image. - -### Run Docker Container - -``` -$ docker run --name quick_start_data -it paddledev/paddle:cpu-demo-latest -``` - -### Download Training Data - -Getting into `/root/paddle/demo/quick_start/data` Directory,using `get_data.sh` to download training data. -Then getting into `/root/paddle/demo/quick_start` Directory, using `preprocess.sh` to pre-process training data. - -``` -$ root@fbd1f2bb71f4:~/paddle/demo/quick_start/data# ./get_data.sh - -Downloading Amazon Electronics reviews data... ---2016-10-31 01:33:43-- http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/reviews_Electronics_5.json.gz -Resolving snap.stanford.edu (snap.stanford.edu)... 171.64.75.80 -Connecting to snap.stanford.edu (snap.stanford.edu)|171.64.75.80|:80... connected. -HTTP request sent, awaiting response... 200 OK -Length: 495854086 (473M) [application/x-gzip] -Saving to: 'reviews_Electronics_5.json.gz' - - 10% [=======> ] 874,279 64.7KB/s eta 2h 13m - -``` - -### Modify Startup Script - -After downloading the data,modify `/root/paddle/demo/quick_start/train.sh` file contents are as follows (one more cd cmd): -``` -set -e -cd /root/paddle/demo/quick_start -cfg=trainer_config.lr.py -#cfg=trainer_config.emb.py -#cfg=trainer_config.cnn.py -#cfg=trainer_config.lstm.py -#cfg=trainer_config.bidi-lstm.py -#cfg=trainer_config.db-lstm.py -paddle train \ - --config=$cfg \ - --save_dir=./output \ - --trainer_count=4 \ - --log_period=20 \ - --num_passes=15 \ - --use_gpu=false \ - --show_parameter_stats_period=100 \ - --test_all_data_in_one_period=1 \ - 2>&1 | tee 'train.log' -``` - -### Commit Docker Image - -``` -$ docker commit quick_start_data mypaddle/paddle:quickstart -``` - -## Use Kubernetes For Training - ->We will use Kubernetes job for training process, following steps shows how to do the training with Kubernetes. - -### Create Yaml Files - -The output result in container will be demolished when job finished (container stopped running), so we need to mount the volume out to the local disk when creating the container to store the training result. Using our previously created image, we can create a [Kubernetes Job](http://kubernetes.io/docs/user-guide/jobs/#what-is-a-job), the yaml contents are as follows: - -``` -apiVersion: batch/v1 -kind: Job -metadata: - name: quickstart -spec: - parallelism: 1 - completions: 1 - template: - metadata: - name: quickstart - spec: - volumes: - - name: output - hostPath: - path: /home/work/paddle_output - containers: - - name: pi - image: mypaddle/paddle:quickstart - command: ["bin/bash", "-c", "/root/paddle/demo/quick_start/train.sh"] - volumeMounts: - - name: output - mountPath: /root/paddle/demo/quick_start/output - restartPolicy: Never -``` - -### Start Paddle Job - -Using the above yaml file to start the Kubernetes job. - -``` -$ kubectl create -f paddle.yaml -``` - -Get the detailed status of the job: - -``` -$ kubectl get job -NAME DESIRED SUCCESSFUL AGE -quickstart 1 0 58s - -$ kubectl describe job quickstart -Name: quickstart -Namespace: default -Image(s): registry.baidu.com/public/paddle:cpu-demo-latest -Selector: controller-uid=f120da72-9f18-11e6-b363-448a5b355b84 -Parallelism: 1 -Completions: 1 -Start Time: Mon, 31 Oct 2016 11:20:16 +0800 -Labels: controller-uid=f120da72-9f18-11e6-b363-448a5b355b84,job-name=quickstart -Pods Statuses: 0 Running / 1 Succeeded / 0 Failed -Volumes: - output: - Type: HostPath (bare host directory volume) - Path: /home/work/paddle_output -Events: - FirstSeen LastSeen Count From SubobjectPath Type Reason Message - --------- -------- ----- ---- ------------- -------- ------ ------- - 1m 1m 1 {job-controller } Normal SuccessfulCreate Created pod: quickstart-fa0wx -``` - -### Get Training Result - -We can use kubectl command to take a look at the status of related pod. - -``` -$ kubectl describe pod quickstart-fa0wx -Name: quickstart-fa0wx -Namespace: default -Node: paddle-demo-let02/10.206.202.44 -Start Time: Mon, 31 Oct 2016 11:20:17 +0800 -Labels: controller-uid=f120da72-9f18-11e6-b363-448a5b355b84,job-name=quickstart -Status: Succeeded -IP: 10.0.0.9 -Controllers: Job/quickstart -Containers: - quickstart: - Container ID: docker://b8561f5c79193550d64fa47418a9e67ebdd71546186e840f88de5026b8097465 - Image: registry.baidu.com/public/paddle:cpu-demo-latest - Image ID: docker://18e457ce3d362ff5f3febf8e7f85ffec852f70f3b629add10aed84f930a68750 - Port: - Command: - bin/bash - -c - /root/paddle/demo/quick_start/train.sh - QoS Tier: - cpu: BestEffort - memory: BestEffort - State: Terminated - Reason: Completed - Exit Code: 0 - Started: Mon, 31 Oct 2016 11:20:20 +0800 - Finished: Mon, 31 Oct 2016 11:21:46 +0800 - Ready: False - Restart Count: 0 - Environment Variables: -Conditions: - Type Status - Ready False -Volumes: - output: - Type: HostPath (bare host directory volume) - Path: /home/work/paddle_output -``` - -We can also ssh to Kubernetes node to take a look at the training result. - -``` -[root@paddle-demo-let02 paddle_output]# ll -total 60 -drwxr-xr-x 2 root root 4096 Oct 31 11:20 pass-00000 -drwxr-xr-x 2 root root 4096 Oct 31 11:20 pass-00001 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00002 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00003 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00004 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00005 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00006 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00007 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00008 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00009 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00010 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00011 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00012 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00013 -drwxr-xr-x 2 root root 4096 Oct 31 11:21 pass-00014 -``` diff --git a/dev/doc/_sources/howto/usage/k8s/src/k8s_data/README.md.txt b/dev/doc/_sources/howto/usage/k8s/src/k8s_data/README.md.txt deleted file mode 100644 index 83cef7affd0ac4d3a1ca08ea5b046fa81e1bc630..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/k8s/src/k8s_data/README.md.txt +++ /dev/null @@ -1,6 +0,0 @@ -To build PaddlePaddle data preparation image in tutorial [Distributed PaddlePaddle Training on AWS with Kubernetes](../../k8s_aws_en.md), run following commands: - -``` -cp -r ../../../../../../demo/quick_start . -docker build . -t prepare-data-image-name -``` diff --git a/dev/doc/_sources/howto/usage/k8s/src/k8s_train/README.md.txt b/dev/doc/_sources/howto/usage/k8s/src/k8s_train/README.md.txt deleted file mode 100644 index 96bf65497ffa23e90c4c9350504f86367b48daf2..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/howto/usage/k8s/src/k8s_train/README.md.txt +++ /dev/null @@ -1,5 +0,0 @@ -To build PaddlePaddle training image in tutorial [Distributed PaddlePaddle Training on AWS with Kubernetes](../../k8s_aws_en.md), run following command: - -``` -docker build . -t train-image-name -``` diff --git a/dev/doc/_sources/index_en.rst.txt b/dev/doc/_sources/index_en.rst.txt deleted file mode 100644 index 1d9cca7de720ebc23fe816f32d158930d91c07e7..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/index_en.rst.txt +++ /dev/null @@ -1,12 +0,0 @@ -PaddlePaddle Documentation -========================== - -.. toctree:: - :maxdepth: 1 - - getstarted/index_en.rst - tutorials/index_en.md - howto/index_en.rst - api/index_en.rst - about/index_en.rst - \ No newline at end of file diff --git a/dev/doc/_sources/tutorials/embedding_model/index_en.md.txt b/dev/doc/_sources/tutorials/embedding_model/index_en.md.txt deleted file mode 100644 index d793a50f488e464bcd90a2fb506a8dcc3c760433..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/embedding_model/index_en.md.txt +++ /dev/null @@ -1,139 +0,0 @@ -# Chinese Word Embedding Model Tutorial # ----------- -This tutorial is to guide you through the process of using a Pretrained Chinese Word Embedding Model in the PaddlePaddle standard format. - -We thank @lipeng for the pull request that defined the model schemas and pretrained the models. - -## Introduction ### -### Chinese Word Dictionary ### -Our Chinese-word dictionary is created on Baidu ZhiDao and Baidu Baike by using in-house word segmentor. For example, the participle of "《红楼梦》" is "《","红楼梦","》",and "《红楼梦》". Our dictionary (using UTF-8 format) has has two columns: word and its frequency. The total word count is 3206325, including 3 special token: - - ``: the start of a sequence - - ``: the end of a sequence - - ``: a word not included in dictionary - -### Pretrained Chinese Word Embedding Model ### -Inspired by paper [A Neural Probabilistic Language Model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf), our model architecture (**Embedding joint of six words->FullyConnect->SoftMax**) is as following graph. And for our dictionary, we pretrain four models with different word vector dimenstions, i.e 32, 64, 128, 256. -
![](./neural-n-gram-model.png)
-
Figure 1. neural-n-gram-model
- -### Download and Extract ### -To download and extract our dictionary and pretrained model, run the following commands. - - cd $PADDLE_ROOT/demo/model_zoo/embedding - ./pre_DictAndModel.sh - -## Chinese Paraphrasing Example ## -We provide a paraphrasing task to show the usage of pretrained Chinese Word Dictionary and Embedding Model. - -### Data Preparation and Preprocess ### - -First, run the following commands to download and extract the in-house dataset. The dataset (using UTF-8 format) has 20 training samples, 5 testing samples and 2 generating samples. - - cd $PADDLE_ROOT/demo/seqToseq/data - ./paraphrase_data.sh - -Second, preprocess data and build dictionary on train data by running the following commands, and the preprocessed dataset is stored in `$PADDLE_SOURCE_ROOT/demo/seqToseq/data/pre-paraphrase`: - - cd $PADDLE_ROOT/demo/seqToseq/ - python preprocess.py -i data/paraphrase [--mergeDict] - -- `--mergeDict`: if using this option, the source and target dictionary are merged, i.e, two dictionaries have the same context. Here, as source and target data are all chinese words, this option can be used. - -### User Specified Embedding Model ### -The general command of extracting desired parameters from the pretrained embedding model based on user dictionary is: - - cd $PADDLE_ROOT/demo/model_zoo/embedding - python extract_para.py --preModel PREMODEL --preDict PREDICT --usrModel USRMODEL--usrDict USRDICT -d DIM - -- `--preModel PREMODEL`: the name of pretrained embedding model -- `--preDict PREDICT`: the name of pretrained dictionary -- `--usrModel USRMODEL`: the name of extracted embedding model -- `--usrDict USRDICT`: the name of user specified dictionary -- `-d DIM`: dimension of parameter - -Here, you can simply run the command: - - cd $PADDLE_ROOT/demo/seqToseq/data/ - ./paraphrase_model.sh - -And you will see following embedding model structure: - - paraphrase_model - |--- _source_language_embedding - |--- _target_language_embedding - -### Training Model in PaddlePaddle ### -First, create a model config file, see example `demo/seqToseq/paraphrase/train.conf`: - - from seqToseq_net import * - is_generating = False - - ################## Data Definition ##################### - train_conf = seq_to_seq_data(data_dir = "./data/pre-paraphrase", - job_mode = job_mode) - - ############## Algorithm Configuration ################## - settings( - learning_method = AdamOptimizer(), - batch_size = 50, - learning_rate = 5e-4) - - ################# Network configure ##################### - gru_encoder_decoder(train_conf, is_generating, word_vector_dim = 32) - -This config is almost the same as `demo/seqToseq/translation/train.conf`. - -Then, train the model by running the command: - - cd $PADDLE_SOURCE_ROOT/demo/seqToseq/paraphrase - ./train.sh - -where `train.sh` is almost the same as `demo/seqToseq/translation/train.sh`, the only difference is following two command arguments: - -- `--init_model_path`: path of the initialization model, here is `data/paraphrase_model` -- `--load_missing_parameter_strategy`: operations when model file is missing, here use a normal distibution to initialize the other parameters except for the embedding layer - -For users who want to understand the dataset format, model architecture and training procedure in detail, please refer to [Text generation Tutorial](../text_generation/index_en.md). - -## Optional Function ## -### Embedding Parameters Observation -For users who want to observe the embedding parameters, this function can convert a PaddlePaddle binary embedding model to a text model by running the command: - - cd $PADDLE_ROOT/demo/model_zoo/embedding - python paraconvert.py --b2t -i INPUT -o OUTPUT -d DIM - -- `-i INPUT`: the name of input binary embedding model -- `-o OUTPUT`: the name of output text embedding model -- `-d DIM`: the dimension of parameter - -You will see parameters like this in output text model: - - 0,4,32156096 - -0.7845433,1.1937413,-0.1704215,0.4154715,0.9566584,-0.5558153,-0.2503305, ...... - 0.0000909,0.0009465,-0.0008813,-0.0008428,0.0007879,0.0000183,0.0001984, ...... - ...... - -- 1st line is **PaddlePaddle format file head**, it has 3 attributes: - - version of PaddlePaddle, here is 0 - - sizeof(float), here is 4 - - total number of parameter, here is 32156096 -- Other lines print the paramters (assume `` = 32) - - each line print 32 paramters splitted by ',' - - there is 32156096/32 = 1004877 lines, meaning there is 1004877 embedding words - -### Embedding Parameters Revision -For users who want to revise the embedding parameters, this function can convert a revised text embedding model to a PaddlePaddle binary model by running the command: - - cd $PADDLE_ROOT/demo/model_zoo/embedding - python paraconvert.py --t2b -i INPUT -o OUTPUT - -- `-i INPUT`: the name of input text embedding model. -- `-o OUTPUT`: the name of output binary embedding model - -Note that the format of input text model is as follows: - - -0.7845433,1.1937413,-0.1704215,0.4154715,0.9566584,-0.5558153,-0.2503305, ...... - 0.0000909,0.0009465,-0.0008813,-0.0008428,0.0007879,0.0000183,0.0001984, ...... - ...... -- there is no file header in 1st line -- each line stores parameters for one word, the separator is commas ',' diff --git a/dev/doc/_sources/tutorials/gan/index_en.md.txt b/dev/doc/_sources/tutorials/gan/index_en.md.txt deleted file mode 100644 index ac9ed37b2264778869f92c0910b1cb946fb4427f..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/gan/index_en.md.txt +++ /dev/null @@ -1,137 +0,0 @@ -# Generative Adversarial Networks (GAN) - -This demo implements GAN training described in the original [GAN paper](https://arxiv.org/abs/1406.2661) and deep convolutional generative adversarial networks [DCGAN paper](https://arxiv.org/abs/1511.06434). - -The high-level structure of GAN is shown in Figure. 1 below. It is composed of two major parts: a generator and a discriminator, both of which are based on neural networks. The generator takes in some kind of noise with a known distribution and transforms it into an image. The discriminator takes in an image and determines whether it is artificially generated by the generator or a real image. So the generator and the discriminator are in a competitive game in which generator is trying to generate image to look as real as possible to fool the discriminator, while the discriminator is trying to distinguish between real and fake images. - -
![](./gan.png)
-

- Figure 1. GAN-Model-Structure - figure credit -

- -The generator and discriminator take turn to be trained using SGD. The objective function of the generator is for its generated images being classified as real by the discriminator, and the objective function of the discriminator is to correctly classify real and fake images. When the GAN model is trained to converge to the equilibrium state, the generator will transform the given noise distribution to the distribution of real images, and the discriminator will not be able to distinguish between real and fake images at all. - -## Implementation of GAN Model Structure -Since GAN model involves multiple neural networks, it requires to use paddle python API. So the code walk-through below can also partially serve as an introduction to the usage of Paddle Python API. - -There are three networks defined in gan_conf.py, namely **generator_training**, **discriminator_training** and **generator**. The relationship to the model structure we defined above is that **discriminator_training** is the discriminator, **generator** is the generator, and the **generator_training** combined the generator and discriminator since training generator would require the discriminator to provide loss function. This relationship is described in the following code: -```python -if is_generator_training: - noise = data_layer(name="noise", size=noise_dim) - sample = generator(noise) - -if is_discriminator_training: - sample = data_layer(name="sample", size=sample_dim) - -if is_generator_training or is_discriminator_training: - label = data_layer(name="label", size=1) - prob = discriminator(sample) - cost = cross_entropy(input=prob, label=label) - classification_error_evaluator( - input=prob, label=label, name=mode + '_error') - outputs(cost) - -if is_generator: - noise = data_layer(name="noise", size=noise_dim) - outputs(generator(noise)) -``` - -In order to train the networks defined in gan_conf.py, one first needs to initialize a Paddle environment, parse the config, create GradientMachine from the config and create trainer from GradientMachine as done in the code chunk below: -```python -import py_paddle.swig_paddle as api -# init paddle environment -api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10', - '--log_period=100', '--gpu_id=' + args.gpu_id, - '--save_dir=' + "./%s_params/" % data_source) - -# Parse config -gen_conf = parse_config(conf, "mode=generator_training,data=" + data_source) -dis_conf = parse_config(conf, "mode=discriminator_training,data=" + data_source) -generator_conf = parse_config(conf, "mode=generator,data=" + data_source) - -# Create GradientMachine -dis_training_machine = api.GradientMachine.createFromConfigProto( -dis_conf.model_config) -gen_training_machine = api.GradientMachine.createFromConfigProto( -gen_conf.model_config) -generator_machine = api.GradientMachine.createFromConfigProto( -generator_conf.model_config) - -# Create trainer -dis_trainer = api.Trainer.create(dis_conf, dis_training_machine) -gen_trainer = api.Trainer.create(gen_conf, gen_training_machine) -``` - -In order to balance the strength between generator and discriminator, we schedule to train whichever one is performing worse by comparing their loss function value. The loss function value can be calculated by a forward pass through the GradientMachine. -```python -def get_training_loss(training_machine, inputs): - outputs = api.Arguments.createArguments(0) - training_machine.forward(inputs, outputs, api.PASS_TEST) - loss = outputs.getSlotValue(0).copyToNumpyMat() - return numpy.mean(loss) -``` - -After training one network, one needs to sync the new parameters to the other networks. The code below demonstrates one example of such use case: -```python -# Train the gen_training -gen_trainer.trainOneDataBatch(batch_size, data_batch_gen) - -# Copy the parameters from gen_training to dis_training and generator -copy_shared_parameters(gen_training_machine, -dis_training_machine) -copy_shared_parameters(gen_training_machine, generator_machine) -``` - - -## A Toy Example -With the infrastructure explained above, we can now walk you through a toy example of generating two dimensional uniform distribution using 10 dimensional Gaussian noise. - -The Gaussian noises are generated using the code below: -```python -def get_noise(batch_size, noise_dim): - return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32') -``` - -The real samples (2-D uniform) are generated using the code below: -```python -# synthesize 2-D uniform data in gan_trainer.py:114 -def load_uniform_data(): - data = numpy.random.rand(1000000, 2).astype('float32') - return data -``` - -The generator and discriminator network are built using fully-connected layer and batch_norm layer, and are defined in gan_conf.py. - -To train the GAN model, one can use the command below. The flag -d specifies the training data (cifar, mnist or uniform) and flag --useGpu specifies whether to use gpu for training (0 is cpu, 1 is gpu). -```bash -$python gan_trainer.py -d uniform --useGpu 1 -``` -The generated samples can be found in ./uniform_samples/ and one example is shown below as Figure 2. One can see that it roughly recovers the 2D uniform distribution. - -
![](./uniform_sample.png)
-

- Figure 2. Uniform Sample -

- -## MNIST Example -### Data preparation -To download the MNIST data, one can use the following commands: -```bash -$cd data/ -$./get_mnist_data.sh -``` - -### Model description -Following the DC-Gan paper (https://arxiv.org/abs/1511.06434), we use convolution/convolution-transpose layer in the discriminator/generator network to better deal with images. The details of the network structures are defined in gan_conf_image.py. - -### Training the model -To train the GAN model on mnist data, one can use the following command: -```bash -$python gan_trainer.py -d mnist --useGpu 1 -``` -The generated sample images can be found at ./mnist_samples/ and one example is shown below as Figure 3. -
![](./mnist_sample.png)
-

- Figure 3. MNIST Sample -

diff --git a/dev/doc/_sources/tutorials/image_classification/index_en.md.txt b/dev/doc/_sources/tutorials/image_classification/index_en.md.txt deleted file mode 100644 index 60c81a6a539944634773f38ec4c9a59709dd4afc..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/image_classification/index_en.md.txt +++ /dev/null @@ -1,221 +0,0 @@ -Image Classification Tutorial -============================== - -This tutorial will guide you through training a convolutional neural network to classify objects using the CIFAR-10 image classification dataset. -As shown in the following figure, the convolutional neural network can recognize the main object in images, and output the classification result. - -
![Image Classification](./image_classification.png)
- -## Data Preparation -First, download CIFAR-10 dataset. CIFAR-10 dataset can be downloaded from its official website. - - - -We have prepared a script to download and process CIFAR-10 dataset. The script will download CIFAR-10 dataset from the official dataset. -It will convert it to jpeg images and organize them into a directory with the required structure for the tutorial. Make sure that you have installed pillow and its dependents. -Consider the following commands: - -1. install pillow dependents - -```bash -sudo apt-get install libjpeg-dev -pip install pillow -``` - -2. download data and preparation - -```bash -cd demo/image_classification/data/ -sh download_cifar.sh -``` - -The CIFAR-10 dataset consists of 60000 32x32 color images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. - -Here are the classes in the dataset, as well as 10 random images from each: -
![Image Classification](./cifar.png)
- - -After downloading and converting, we should find a directory (cifar-out) containing the dataset in the following format: - -``` -train ----airplane ----automobile ----bird ----cat ----deer ----dog ----frog ----horse ----ship ----truck -test ----airplane ----automobile ----bird ----cat ----deer ----dog ----frog ----horse ----ship ----truck -``` - -It has two directories:`train` and `test`. These two directories contain training data and testing data of CIFAR-10, respectively. Each of these two folders contains 10 sub-folders, ranging from `airplane` to `truck`. Each sub-folder contains images with the corresponding label. After the images are organized into this structure, we are ready to train an image classification model. - -## Preprocess -After the data has been downloaded, it needs to be pre-processed into the Paddle format. We can run the following command for preprocessing. - -``` -cd demo/image_classification/ -sh preprocess.sh -``` - -`preprocess.sh` calls `./demo/image_classification/preprocess.py` to preprocess image data. -```sh -export PYTHONPATH=$PYTHONPATH:../../ -data_dir=./data/cifar-out -python preprocess.py -i $data_dir -s 32 -c 1 -``` - -`./demo/image_classification/preprocess.py` has the following arguments - -- `-i` or `--input` specifes the input data directory. -- `-s` or `--size` specifies the processed size of images. -- `-c` or `--color` specifes whether images are color images or gray images. - - -## Model Training -We need to create a model config file before training the model. An example of the config file (vgg_16_cifar.py) is listed below. **Note**, it is slightly different from the `vgg_16_cifar.py` which also applies to the prediction. - -```python -from paddle.trainer_config_helpers import * -data_dir='data/cifar-out/batches/' -meta_path=data_dir+'batches.meta' -args = {'meta':meta_path, 'mean_img_size': 32, - 'img_size': 32, 'num_classes': 10, - 'use_jpeg': 1, 'color': "color"} -define_py_data_sources2(train_list=data_dir+"train.list", - test_list=data_dir+'test.list', - module='image_provider', - obj='processData', - args=args) -settings( - batch_size = 128, - learning_rate = 0.1 / 128.0, - learning_method = MomentumOptimizer(0.9), - regularization = L2Regularization(0.0005 * 128)) - -img = data_layer(name='image', size=3*32*32) -lbl = data_layer(name="label", size=10) -# small_vgg is predined in trainer_config_helpers.network -predict = small_vgg(input_image=img, num_channels=3) -outputs(classification_cost(input=predict, label=lbl)) -``` - -The first line imports python functions for defining networks. -```python -from paddle.trainer_config_helpers import * -``` - -Then define an `define_py_data_sources2` which use python data provider -interface. The arguments in `args` are used in `image_provider.py` which -yeilds image data and transform them to Paddle. - - `meta`: the mean value of training set. - - `mean_img_size`: the size of mean feature map. - - `img_size`: the height and width of input image. - - `num_classes`: the number of classes. - - `use_jpeg`: the data storage type when preprocessing. - - `color`: specify color image. - -`settings` specifies the training algorithm. In the following example, -it specifies learning rate as 0.1, but divided by batch size, and the weight decay -is 0.0005 and multiplied by batch size. -```python -settings( - batch_size = 128, - learning_rate = 0.1 / 128.0, - learning_method = MomentumOptimizer(0.9), - regularization = L2Regularization(0.0005 * 128) -) -``` - -The `small_vgg` specifies the network. We use a small version of VGG convolutional network as our network -for classification. A description of VGG network can be found here [http://www.robots.ox.ac.uk/~vgg/research/very_deep/](http://www.robots.ox.ac.uk/~vgg/research/very_deep/). -```python -# small_vgg is predined in trainer_config_helpers.network -predict = small_vgg(input_image=img, num_channels=3) -``` -After writing the config, we can train the model by running the script train.sh. - -```bash -config=vgg_16_cifar.py -output=./cifar_vgg_model -log=train.log - -paddle train \ ---config=$config \ ---dot_period=10 \ ---log_period=100 \ ---test_all_data_in_one_period=1 \ ---use_gpu=1 \ ---save_dir=$output \ -2>&1 | tee $log - -python -m paddle.utils.plotcurve -i $log > plot.png -``` - -- Here we use GPU mode to train. If you have no gpu environment, just set `use_gpu=0`. - -- `./demo/image_classification/vgg_16_cifar.py` is the network and data configuration file. The meaning of the other flags can be found in the documentation of the command line flags. - -- The script `plotcurve.py` requires the python module of `matplotlib`, so if it fails, maybe you need to install `matplotlib`. - - -After training finishes, the training and testing error curves will be saved to `plot.png` using `plotcurve.py` script. An example of the plot is shown below: - -
![Training and testing curves.](./plot.png)
- - -## Prediction -After we train the model, the model file as well as the model parameters are stored in path `./cifar_vgg_model/pass-%05d`. For example, the model of the 300-th pass is stored at `./cifar_vgg_model/pass-00299`. - -To make a prediction for an image, one can run `predict.sh` as follows. The script will output the label of the classfiication. - -``` -sh predict.sh -``` - -predict.sh: -``` -model=cifar_vgg_model/pass-00299/ -image=data/cifar-out/test/airplane/seaplane_s_000978.png -use_gpu=1 -python prediction.py $model $image $use_gpu -``` - -## Exercise -Train a image classification of birds using VGG model and CUB-200 dataset. The birds dataset can be downloaded here. It contains an image dataset with photos of 200 bird species (mostly North American). - - - - - - -## Delve into Details -### Convolutional Neural Network -A Convolutional Neural Network is a feedforward neural network that uses convolution layers. It is very suitable for building neural networks that process and understand images. A standard convolutional neural network is shown below: - -![Convolutional Neural Network](./lenet.png) - -Convolutional Neural Network contains the following layers: - -- Convolutional layer: It uses convolution operation to extract features from an image or a feature map. -- Pooling layer: It uses max-pooling to downsample feature maps. -- Fully Connected layer: It uses fully connected connections to transform features. - -Convolutional Neural Network achieves amazing performance for image classification because it exploits two important characteristics of images: *local correlation* and *spatial invariance*. By iteratively applying convolution and max-pooing operations, convolutional neural network can well represent these two characteristics of images. - - -For more details of how to define layers and their connections, please refer to the documentation of layers. diff --git a/dev/doc/_sources/tutorials/imagenet_model/resnet_model_en.md.txt b/dev/doc/_sources/tutorials/imagenet_model/resnet_model_en.md.txt deleted file mode 100644 index 478ad06193b14ba7fe02238df621db1f7b0804d4..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/imagenet_model/resnet_model_en.md.txt +++ /dev/null @@ -1,284 +0,0 @@ -# Model Zoo - ImageNet # - -[ImageNet](http://www.image-net.org/) is a popular dataset for generic object classification. This tutorial provides convolutional neural network(CNN) models for ImageNet. - -## ResNet Introduction - -ResNets from paper [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385) won the 1st place on the ILSVRC 2015 classification task. They present residual learning framework to ease the training of networks that are substantially deeper than those used previously. The residual connections are shown in following figure. The left building block is used in network of 34 layers and the right bottleneck building block is used in network of 50, 101, 152 layers . - -
![resnet_block](./resnet_block.jpg)
-
Figure 1. ResNet Block
- -We present three ResNet models, which are converted from the models provided by the authors . The classfication errors tested in PaddlePaddle on 50,000 ILSVRC validation set with input images channel order of **BGR** by single scale with the shorter side of 256 and single crop as following table. -
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - -
ResNetTop-1Model Size
ResNet-5024.9%99M
ResNet-10123.7%173M
ResNet-15223.2%234M
-
- -## ResNet Model - -See ```demo/model_zoo/resnet/resnet.py```. This config contains network of 50, 101 and 152 layers. You can specify layer number by adding argument like ```--config_args=layer_num=50``` in command line arguments. - -### Network Visualization - -You can get a diagram of ResNet network by running the following commands. The script generates dot file and then converts dot file to PNG file, which needs to install graphviz to convert. - -``` -cd demo/model_zoo/resnet -./net_diagram.sh -``` - -### Model Download - -``` -cd demo/model_zoo/resnet -./get_model.sh -``` -You can run above command to download all models and mean file and save them in ```demo/model_zoo/resnet/model``` if downloading successfully. - -``` -mean_meta_224 resnet_101 resnet_152 resnet_50 -``` - * resnet_50: model of 50 layers. - * resnet_101: model of 101 layers. - * resnet_152: model of 152 layers. - * mean\_meta\_224: mean file with 3 x 224 x 224 size in **BGR** order. You also can use three mean values: 103.939, 116.779, 123.68. - -### Parameter Info - -* **Convolution Layer Weight** - - As batch normalization layer is connected after each convolution layer, there is no parameter of bias and only one weight in this layer. - shape: `(Co, ky, kx, Ci)` - * Co: channle number of output feature map. - * ky: filter size in vertical direction. - * kx: filter size in horizontal direction. - * Ci: channle number of input feature map. - - 2-Dim matrix: (Co * ky * kx, Ci), saved in row-major order. - -* **Fully connected Layer Weight** - - 2-Dim matrix: (input layer size, this layer size), saved in row-major order. - -* **[Batch Normalization]() Layer Weight** - -There are four parameters in this layer. In fact, only .w0 and .wbias are the learned parameters. The other two are therunning mean and variance respectively. They will be loaded in testing. Following table shows parameters of a batch normzalization layer. -
- ----- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Parameter NameNumberMeaning
_res2_1_branch1_bn.w0256gamma, scale parameter
_res2_1_branch1_bn.w1256mean value of feature map
_res2_1_branch1_bn.w2256variance of feature map
_res2_1_branch1_bn.wbias256beta, shift parameter
-
- -### Parameter Observation - -Users who want to observe the parameters can use Python to read: - -``` -import sys -import numpy as np - -def load(file_name): - with open(file_name, 'rb') as f: - f.read(16) # skip header for float type. - return np.fromfile(f, dtype=np.float32) - -if __name__=='__main__': - weight = load(sys.argv[1]) -``` - -or simply use following shell command: - -``` -od -j 16 -f _res2_1_branch1_bn.w0 -``` - -## Feature Extraction - -We provide both C++ and Python interfaces to extract features. The following examples use data in `demo/model_zoo/resnet/example` to show the extracting process in detail. - -### C++ Interface - -First, specify image data list in `define_py_data_sources2` in the config, see example `demo/model_zoo/resnet/resnet.py`. - -``` - train_list = 'train.list' if not is_test else None - # mean.meta is mean file of ImageNet dataset. - # mean.meta size : 3 x 224 x 224. - # If you use three mean value, set like: - # "mean_value:103.939,116.779,123.68;" - args={ - 'mean_meta': "model/mean_meta_224/mean.meta", - 'image_size': 224, 'crop_size': 224, - 'color': True,'swap_channel:': [2, 1, 0]} - define_py_data_sources2(train_list, - 'example/test.list', - module="example.image_list_provider", - obj="processData", - args=args) -``` - -Second, specify layers to extract features in `Outputs()` of `resnet.py`. For example, - -``` -Outputs("res5_3_branch2c_conv", "res5_3_branch2c_bn") -``` - -Third, specify model path and output directory in `extract_fea_c++.sh`, and then run the following commands. - -``` -cd demo/model_zoo/resnet -./extract_fea_c++.sh -``` - -If successful, features are saved in `fea_output/rank-00000` as follows. And you can use `load_feature_c` interface in `load_feature.py ` to load such a file. - -``` --0.115318 -0.108358 ... -0.087884;-1.27664 ... -1.11516 -2.59123; --0.126383 -0.116248 ... -0.00534909;-1.42593 ... -1.04501 -1.40769; -``` - -* Each line stores features of a sample. Here, the first line stores features of `example/dog.jpg` and second line stores features of `example/cat.jpg`. -* Features of different layers are splitted by `;`, and their order is consistent with the layer order in `Outputs()`. Here, the left features are `res5_3_branch2c_conv` layer and right features are `res5_3_branch2c_bn` layer. - -### Python Interface - -`demo/model_zoo/resnet/classify.py` is an example to show how to use Python to extract features. Following example still uses data of `./example/test.list`. Command is as follows: - -``` -cd demo/model_zoo/resnet -./extract_fea_py.sh -``` - -extract_fea_py.sh: - -``` -python classify.py \ - --job=extract \ - --conf=resnet.py\ - --use_gpu=1 \ - --mean=model/mean_meta_224/mean.meta \ - --model=model/resnet_50 \ - --data=./example/test.list \ - --output_layer="res5_3_branch2c_conv,res5_3_branch2c_bn" \ - --output_dir=features - -``` -* \--job=extract: specify job mode to extract feature. -* \--conf=resnet.py: network configure. -* \--use_gpu=1: speficy GPU mode. -* \--model=model/resnet_5: model path. -* \--data=./example/test.list: data list. -* \--output_layer="xxx,xxx": specify layers to extract features. -* \--output_dir=features: output diretcoty. - -If run successfully, you will see features saved in `features/batch_0`, this file is produced with cPickle. You can use `load_feature_py` interface in `load_feature.py` to open the file, and it returns a dictionary as follows: - -``` -{ -'cat.jpg': {'res5_3_branch2c_conv': array([[-0.12638293, -0.116248 , -0.11883899, ..., -0.00895038, 0.01994277, -0.00534909]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.42593431, -1.28918779, -1.32414699, ..., -1.45933616, -1.04501402, -1.40769434]], dtype=float32)}, -'dog.jpg': {'res5_3_branch2c_conv': array([[-0.11531784, -0.10835785, -0.08809858, ...,0.0055237, 0.01505112, -0.08788397]], dtype=float32), 'res5_3_branch2c_bn': array([[-1.27663755, -1.18272924, -0.90937918, ..., -1.25178063, -1.11515927, -2.59122872]], dtype=float32)} -} -``` - -Observed carefully, these feature values are consistent with the above results extracted by C++ interface. - -## Prediction - -`classify.py` also can be used to predict. We provide an example script `predict.sh` to predict data in `example/test.list` using a ResNet model with 50 layers. - -``` -cd demo/model_zoo/resnet -./predict.sh -``` - -predict.sh calls the `classify.py`: - -``` -python classify.py \ - --job=predict \ - --conf=resnet.py\ - --multi_crop \ - --model=model/resnet_50 \ - --use_gpu=1 \ - --data=./example/test.list -``` -* \--job=extract: speficy job mode to predict. -* \--conf=resnet.py: network configure. -* \--multi_crop: use 10 crops and average predicting probability. -* \--use_gpu=1: speficy GPU mode. -* \--model=model/resnet_50: model path. -* \--data=./example/test.list: data list. - -If run successfully, you will see following results, where 156 and 285 are labels of the images. - -``` -Label of example/dog.jpg is: 156 -Label of example/cat.jpg is: 282 -``` diff --git a/dev/doc/_sources/tutorials/index_en.md.txt b/dev/doc/_sources/tutorials/index_en.md.txt deleted file mode 100644 index 77331a703b6f0fdf92921ebcc476325b7327e976..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/index_en.md.txt +++ /dev/null @@ -1,14 +0,0 @@ -# TUTORIALS -There are several examples and demos here. - -* [Quick Start](quick_start/index_en.md) -* [MovieLens Regression](rec/ml_regression_en.rst) -* [Image Classification](image_classification/index_en.md) -* [Sentiment Analysis](sentiment_analysis/index_en.md) -* [Semantic Role Labeling](semantic_role_labeling/index_en.md) -* [Text Generation](text_generation/index_en.md) -* [Image Auto-Generation](gan/index_en.md) - -## Model Zoo -* [ImageNet: ResNet](imagenet_model/resnet_model_en.md) -* [Embedding: Chinese Word](embedding_model/index_en.md) diff --git a/dev/doc/_sources/tutorials/quick_start/index_en.md.txt b/dev/doc/_sources/tutorials/quick_start/index_en.md.txt deleted file mode 100644 index ca110431cf921ae0480d3fb2b17c58f90a84cc0e..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/quick_start/index_en.md.txt +++ /dev/null @@ -1,562 +0,0 @@ -# Quick Start - -This tutorial will teach the basics of deep learning (DL), including how to implement many different models in PaddlePaddle. You will learn how to: - - Prepare data into the standardized format that PaddlePaddle accepts. - - Write data providers that read data into PaddlePaddle. - - Configure neural networks in PaddlePaddle layer by layer. - - Train models. - - Perform inference with trained models. - - -## Install - -To get started, please install PaddlePaddle on your computer. Throughout this tutorial, you will learn by implementing different DL models for text classification. - -To install PaddlePaddle, please follow the instructions here: Build and Install. - -## Overview -For the first step, you will use PaddlePaddle to build a **text classification** system. For example, suppose you run an e-commence website, and you want to analyze the sentiment of user reviews to evaluate product quality. - -For example, given the input - -``` -This monitor is fantastic. -``` - -Your classifier should output “positive”, since this text snippet shows that the user is satisfied with the product. Given this input: - -``` -The monitor breaks down two months after purchase. -``` - -the classifier should output “negative“. - -To build your text classification system, your code will need to perform five steps: -
![](./src/Pipeline_en.jpg)
- - - Preprocess data into a standardized format. - - Provide data to the learning model. - - Specify the neural network structure. - - Train the model. - - Inference (make prediction on test examples). - - -1. Preprocess data into standardized format - - In the text classification example, you will start with a text file with one training example per line. Each line contains category id (in machine learning, often denoted the target y), followed by the input text (often denoted x); these two elements are separated by a Tab. For example: ```positive [tab] This monitor is fantastic```. You will preprocess this raw data into a format that Paddle can use. - -2. Provide data to the learning model. - - You can write data providers in Python. For any required data preprocessing step, you can add the preprocessing code to the PyDataProvider Python file. - - In our text classification example, every word or character will be converted into an integer id, specified in a dictionary file. It perform a dictionary lookup in PyDataProvider to get the id. -3. Specify neural network structure. (From easy to hard, we provide 4 kinds of network configurations) - - A logistic regression model. - - A word embedding model. - - A convolutional neural network model. - - A sequential recurrent neural network model. - - You will also learn different learning algorithms. -4. Training model. -5. Inference. - -## Preprocess data into standardized format -In this example, you are going to use [Amazon electronic product review dataset](http://jmcauley.ucsd.edu/data/amazon/) to build a bunch of deep neural network models for text classification. Each text in this dataset is a product review. This dataset has two categories: “positive” and “negative”. Positive means the reviewer likes the product, while negative means the reviewer does not like the product. - -`demo/quick_start` in the [source code](https://github.com/PaddlePaddle/Paddle) provides script for downloading the preprocessed data as shown below. (If you want to process the raw data, you can use the script `demo/quick_start/data/proc_from_raw_data/get_data.sh`). - -```bash -cd demo/quick_start -./data/get_data.sh -``` - -## Transfer Data to Model -### Write Data Provider with Python -The following `dataprovider_bow.py` gives a complete example of writing data provider with Python. It includes the following parts: - -* initalizer: define the additional meta-data of the data provider and the types of the input data. -* process: Each `yield` returns a data sample. In this case, it return the text representation and category id. The order of features in the returned result needs to be consistent with the definition of the input types in `initalizer`. - -```python -from paddle.trainer.PyDataProvider2 import * - -# id of the word not in dictionary -UNK_IDX = 0 - -# initializer is called by the framework during initialization. -# It allows the user to describe the data types and setup the -# necessary data structure for later use. -# `settings` is an object. initializer need to properly fill settings.input_types. -# initializer can also store other data structures needed to be used at process(). -# In this example, dictionary is stored in settings. -# `dictionay` and `kwargs` are arguments passed from trainer_config.lr.py -def initializer(settings, dictionary, **kwargs): - # Put the word dictionary into settings - settings.word_dict = dictionary - - # setting.input_types specifies what the data types the data provider - # generates. - settings.input_types = [ - # The first input is a sparse_binary_vector, - # which means each dimension of the vector is either 0 or 1. It is the - # bag-of-words (BOW) representation of the texts. - sparse_binary_vector(len(dictionary)), - # The second input is an integer. It represents the category id of the - # sample. 2 means there are two labels in the dataset. - # (1 for positive and 0 for negative) - integer_value(2)] - -# Delaring a data provider. It has an initializer 'data_initialzer'. -# It will cache the generated data of the first pass in memory, so that -# during later pass, no on-the-fly data generation will be needed. -# `setting` is the same object used by initializer() -# `file_name` is the name of a file listed train_list or test_list file given -# to define_py_data_sources2(). See trainer_config.lr.py. -@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, file_name): - # Open the input data file. - with open(file_name, 'r') as f: - # Read each line. - for line in f: - # Each line contains the label and text of the comment, separated by \t. - label, comment = line.strip().split('\t') - - # Split the words into a list. - words = comment.split() - - # convert the words into a list of ids by looking them up in word_dict. - word_vector = [settings.word_dict.get(w, UNK_IDX) for w in words] - - # Return the features for the current comment. The first is a list - # of ids representing a 0-1 binary sparse vector of the text, - # the second is the integer id of the label. - yield word_vector, int(label) -``` - -### Define Python Data Provider in Configuration files. -You need to add a data provider definition `define_py_data_sources2` in our network configuration. This definition specifies: - -- The path of the training and testing data (`data/train.list`, `data/test.list`). -- The location of the data provider file (`dataprovider_bow`). -- The function to call to get data. (`process`). -- Additional arguments or data. Here it passes the path of word dictionary. - -```python -from paddle.trainer_config_helpers import * - -file = "data/dict.txt" -word_dict = dict() -with open(dict_file, 'r') as f: - for i, line in enumerate(f): - w = line.strip().split()[0] - word_dict[w] = i -# define the data sources for the model. -# We need to use different process for training and prediction. -# For training, the input data includes both word IDs and labels. -# For prediction, the input data only includs word Ids. -define_py_data_sources2(train_list='data/train.list', - test_list='data/test.list', - module="dataprovider_bow", - obj="process", - args={"dictionary": word_dict}) -``` -You can refer to the following link for more detailed examples and data formats: PyDataProvider2. - -## Network Architecture -We will describe four kinds of network architectures in this section. -
![](./src/PipelineNetwork_en.jpg)
- -First, you will build a logistic regression model. Later, you will also get chance to build other more powerful network architectures. -For more detailed documentation, you could refer to: layer documentation. All configuration files are in `demo/quick_start` directory. - -### Logistic Regression -The architecture is illustrated in the following picture: -
![](./src/NetLR_en.png)
- -- You need define the data for text features. The size of the data layer is the number of words in the dictionary. - -```python -word = data_layer(name="word", size=voc_dim) -``` - -- You also need to define the category id for each example. The size of the data layer is the number of labels. - -```python -label = data_layer(name="label", size=label_dim) -``` - -- It uses logistic regression model to classify the vector, and it will output the classification error during training. - - Each layer has an *input* argument that specifies its input layer. Some layers can have multiple input layers. You can use a list of the input layers as input in that case. - - *size* for each layer means the number of neurons of the layer. - - *act_type* means activation function applied to the output of each neuron independently. - - Some layers can have additional special inputs. For example, `classification_cost` needs ground truth label as input to compute classification loss and error. -```python -# Define a fully connected layer with logistic activation (also called softmax activation). -output = fc_layer(input=word, - size=label_dim, - act_type=SoftmaxActivation()) -# Define cross-entropy classification loss and error. -classification_cost(input=output, label=label) -``` - -Performance summary: You can refer to the training and testing scripts later. In order to compare different network architectures, the model complexity and test classification error are listed in the following table: - - -
- - - - - - - - - - - - - - - - - -
Network nameNumber of parametersTest error
Logistic regression252 KB8.652%
- -
- -### Word Embedding Model -In order to use the word embedding model, you need to change the data provider a little bit to make the input words as a sequence of word IDs. The revised data provider `dataprovider_emb.py` is listed below. You only need to change initializer() for the type of the first input. It is changed from sparse_binary_vector to sequence of intergers. process() remains the same. This data provider can also be used for later sequence models. - -```python -def initializer(settings, dictionary, **kwargs): - # Put the word dictionary into settings - settings.word_dict = dictionary - settings.input_types = [ - # Define the type of the first input as a sequence of integers. - integer_value_sequence(len(dictionary)), - # Define the second input for label id - integer_value(2)] - -@provider(init_hook=initializer) -def process(settings, file_name): - ... - # omitted, it is same as the data provider for LR model -``` - -This model is very similar to the framework of logistic regression, but it uses word embedding vectors instead of a sparse vectors to represent words. -
![](./src/NetContinuous_en.png)
- -- It can look up the dense word embedding vector in the dictionary (its words embedding vector is `word_dim`). The input is a sequence of N words, the output is N word_dim dimensional vectors. - -```python -emb = embedding_layer(input=word, dim=word_dim) -``` - -- It averages all the word embedding in a sentence to get its sentence representation. - -```python -avg = pooling_layer(input=emb, pooling_type=AvgPooling()) -``` - -The other parts of the model are the same as logistic regression network. - -The performance is summarized in the following table: - - -
- - - - - - - - - - - - - - - - - -
Network nameNumber of parametersTest error
Word embedding model15 MB8.484%
-
-
- -### Convolutional Neural Network Model -Convolutional neural network converts a sequence of word embeddings into a sentence representation using temporal convolutions. You will transform the fully connected layer of the word embedding model to 3 new sub-steps. -
![](./src/NetConv_en.png)
- - -Text convolution has 3 steps: -1. Get K nearest neighbor context of each word in a sentence, stack them into a 2D vector representation. -2. Apply temporal convolution to this representation to produce a new hidden_dim dimensional vector. -3. Apply max-pooling to the new vectors at all the time steps in a sentence to get a sentence representation. - -```python -# context_len means convolution kernel size. -# context_start means the start of the convolution. It can be negative. In that case, zero padding is applied. -text_conv = sequence_conv_pool(input=emb, - context_start=k, - context_len=2 * k + 1) -``` - -The performance is summarized in the following table: - - -
- - - - - - - - - - - - - - - - - -
Network nameNumber of parametersTest error
Convolutional model16 MB5.628%
-
- -### Recurrent Model -
![](./src/NetRNN_en.png)
- -You can use Recurrent neural network as our time sequence model, including simple RNN model, GRU model, and LSTM model。 - -- GRU model can be specified via: - -```python -gru = simple_gru(input=emb, size=gru_size) -``` - -- LSTM model can be specified via: - -```python -lstm = simple_lstm(input=emb, size=lstm_size) -``` - -You can use single layer LSTM model with Dropout for our text classification problem. The performance is summarized in the following table: - - -
- - - - - - - - - - - - - - - - - -
Network nameNumber of parametersTest error
Recurrent model16 MB4.812%
- -
- -## Optimization Algorithm -Optimization algorithms include Momentum, RMSProp, AdaDelta, AdaGrad, Adam, and Adamax. You can use Adam optimization method here, with L2 regularization and gradient clipping, because Adam has been proved to work very well for training recurrent neural network. - -```python -settings(batch_size=128, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25) -``` - -## Training Model -After completing data preparation and network architecture specification, you will run the training script. -
![](./src/PipelineTrain_en.png)
- -Training script: our training script is in `train.sh` file. The training arguments are listed below: - -```bash -paddle train \ ---config=trainer_config.py \ ---log_period=20 \ ---save_dir=./output \ ---num_passes=15 \ ---use_gpu=false -``` - -We do not provide examples on how to train on clusters here. If you want to train on clusters, please follow the distributed training documentation or other demos for more details. - -## Inference -You can use the trained model to perform prediction on the dataset with no labels. You can also evaluate the model on dataset with labels to obtain its test accuracy. -
![](./src/PipelineTest_en.png)
- -The test script is listed below. PaddlePaddle can evaluate a model on the data with labels specified in `test.list`. - -```bash -paddle train \ ---config=trainer_config.lstm.py \ ---use_gpu=false \ ---job=test \ ---init_model_path=./output/pass-0000x -``` - -We will give an example of performing prediction using Recurrent model on a dataset with no labels. You can refer to Python Prediction API tutorial,or other demo for the prediction process using Python. You can also use the following script for inference or evaluation. - -inference script (predict.sh): - -```bash -model="output/pass-00003" -paddle train \ - --config=trainer_config.lstm.py \ - --use_gpu=false \ - --job=test \ - --init_model_path=$model \ - --config_args=is_predict=1 \ - --predict_output_dir=. \ - -mv rank-00000 result.txt -``` -User can choose the best model base on the training log instead of model `output/pass-00003`. There are several differences between training and inference network configurations. -- You do not need labels during inference. -- Outputs need to be specified to the classification probability layer (the output of softmax layer), or the id of maximum probability (`max_id` layer). An example to output the id and probability is given in the code snippet. -- batch_size = 1. -- You need to specify the location of `test_list` in the test data. - -The results in `result.txt` is as follows, each line is one sample. - -``` -predicted_label_id;probability_of_label_0 probability_of_label_1 # the first sample -predicted_label_id;probability_of_label_0 probability_of_label_1 # the second sample -``` - - -```python -is_predict = get_config_arg('is_predict', bool, False) -trn = 'data/train.list' if not is_predict else None -tst = 'data/test.list' if not is_predict else 'data/pred.list' -obj = 'process' if not is_predict else 'process_pre' -batch_size = 128 if not is_predict else 1 -if is_predict: - maxid = maxid_layer(output) - outputs([maxid,output]) -else: - label = data_layer(name="label", size=2) - cls = classification_cost(input=output, label=label) outputs(cls) -``` - -## Summary -The scripts of data downloading, network configurations, and training scrips are in `/demo/quick_start`. The following table summarizes the performance of our network architecture on Amazon-Elec dataset(25k): - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Network nameNumber of parametersError rateConfiguration file name
Logistic regression model(BOW) 252KB 8.652%trainer_config.lr.py
Word embedding 15MB 8.484%trainer_config.emb.py
Convolution model 16MB 5.628%trainer_config.cnn.py
Time sequence model 16MB 4.812%trainer_config.lstm.py
-
-
- -## Appendix -### Command Line Argument - -* \--config:network architecture path. -* \--save_dir:model save directory. -* \--log_period:the logging period per batch. -* \--num_passes:number of training passes. One pass means the training would go over the whole training dataset once. -* \--config_args:Other configuration arguments. -* \--init_model_path:The path of the initial model parameter. - -By default, the trainer will save model every pass. You can also specify `saving_period_by_batches` to set the frequency of batch saving. You can use `show_parameter_stats_period` to print the statistics of the parameters, which are very useful for tuning parameters. Other command line arguments can be found in command line argument documentation。 - -### Log - -``` -TrainerInternal.cpp:160] Batch=20 samples=2560 AvgCost=0.628761 CurrentCost=0.628761 Eval: classification_error_evaluator=0.304297 CurrentEval: classification_error_evaluator=0.304297 -``` -During model training, you will see the log like the examples above: -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
NameExplanation
Batch=20 You have trained 20 batches.
samples=2560 You have trained 2560 examples.
AvgCost The average cost from the first batch to the current batch.
CurrentCost the average cost of the last log_period batches
Eval: classification_error_evaluator The average classification error from the first batch to the current batch.
CurrentEval: classification_error_evaluator The average error rate of the last log_period batches
-
-
diff --git a/dev/doc/_sources/tutorials/rec/ml_dataset_en.md.txt b/dev/doc/_sources/tutorials/rec/ml_dataset_en.md.txt deleted file mode 100644 index 25dea5c4afbf1ce1c1ac6195cbd245b116459e2e..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/rec/ml_dataset_en.md.txt +++ /dev/null @@ -1,111 +0,0 @@ -```eval_rst -.. _demo_ml_dataset: -``` - -# MovieLens Dataset - -The [MovieLens Dataset](http://grouplens.org/datasets/movielens/) was collected by GroupLens Research. -The data set contains some user information, movie information, and many movie ratings from \[1-5\]. -The data sets have many version depending on the size of set. -We use [MovieLens 1M Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip) as a demo dataset, which contains -1 million ratings from 6000 users on 4000 movies. Released 2/2003. - -## Dataset Features - -In [ml-1m Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip), there are many features in these dataset. -The data files (which have ".dat" extension) in [ml-1m Dataset](http://files.grouplens.org/datasets/movielens/ml-1m.zip) -is basically CSV file that delimiter is "::". The description in README we quote here. - -### RATINGS FILE DESCRIPTION(ratings.dat) - - -All ratings are contained in the file "ratings.dat" and are in the -following format: - -UserID::MovieID::Rating::Timestamp - -- UserIDs range between 1 and 6040 -- MovieIDs range between 1 and 3952 -- Ratings are made on a 5-star scale (whole-star ratings only) -- Timestamp is represented in seconds since the epoch as returned by time(2) -- Each user has at least 20 ratings - -### USERS FILE DESCRIPTION(users.dat) - -User information is in the file "users.dat" and is in the following -format: - -UserID::Gender::Age::Occupation::Zip-code - -All demographic information is provided voluntarily by the users and is -not checked for accuracy. Only users who have provided some demographic -information are included in this data set. - -- Gender is denoted by a "M" for male and "F" for female -- Age is chosen from the following ranges: - - * 1: "Under 18" - * 18: "18-24" - * 25: "25-34" - * 35: "35-44" - * 45: "45-49" - * 50: "50-55" - * 56: "56+" - -- Occupation is chosen from the following choices: - - * 0: "other" or not specified - * 1: "academic/educator" - * 2: "artist" - * 3: "clerical/admin" - * 4: "college/grad student" - * 5: "customer service" - * 6: "doctor/health care" - * 7: "executive/managerial" - * 8: "farmer" - * 9: "homemaker" - * 10: "K-12 student" - * 11: "lawyer" - * 12: "programmer" - * 13: "retired" - * 14: "sales/marketing" - * 15: "scientist" - * 16: "self-employed" - * 17: "technician/engineer" - * 18: "tradesman/craftsman" - * 19: "unemployed" - * 20: "writer" - -### MOVIES FILE DESCRIPTION(movies.dat) - -Movie information is in the file "movies.dat" and is in the following -format: - -MovieID::Title::Genres - -- Titles are identical to titles provided by the IMDB (including -year of release) -- Genres are pipe-separated and are selected from the following genres: - - * Action - * Adventure - * Animation - * Children's - * Comedy - * Crime - * Documentary - * Drama - * Fantasy - * Film-Noir - * Horror - * Musical - * Mystery - * Romance - * Sci-Fi - * Thriller - * War - * Western - -- Some MovieIDs do not correspond to a movie due to accidental duplicate -entries and/or test entries -- Movies are mostly entered by hand, so errors and inconsistencies may exist diff --git a/dev/doc/_sources/tutorials/rec/ml_regression_en.rst.txt b/dev/doc/_sources/tutorials/rec/ml_regression_en.rst.txt deleted file mode 100644 index 993b9a516f134ff8b59e8755b721f76c8f32f0fd..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/rec/ml_regression_en.rst.txt +++ /dev/null @@ -1,348 +0,0 @@ -Regression MovieLens Ratting -============================ - -Here we demonstrate a **Cosine Similarity Regression** job in movie lens dataset. -This demo will show how paddle does (word) embedding job, -handles the similarity regression, -the character-level convolutional networks for text, and how does paddle handle -multiple types of inputs. -Note that the model structure is not fine-tuned and just a demo to show how paddle works. - - -YOU ARE WELCOME TO BUILD A BETTER DEMO -BY USING PADDLEPADDLE, AND LET US KNOW TO MAKE THIS DEMO BETTER. - -Data Preparation -```````````````` -Download and extract dataset -'''''''''''''''''''''''''''' -We use :ref:`demo_ml_dataset` here. -To download and unzip the dataset, simply run the following commands. - -.. code-block:: bash - - cd demo/recommendation/data - ./ml_data.sh - -And the directory structure of :code:`demo/recommendation/data/ml-1m` is: - -.. code-block:: text - - +--ml-1m - +--- movies.dat # movie features - +--- ratings.dat # ratings - +--- users.dat # user features - +--- README # dataset description - -Field config file -''''''''''''''''' -**Field config file** is used to specify the fields of the dataset and the file format, -i.e, specific **WHAT** type it is in each feature file. - -The field config file of ml-1m shows in :code:`demo/recommendation/data/config.json`. -It specifics the field types and file names: 1) there are four types of field for user file\: id, gender, age and occupation; -2) the filename is "users.dat", and the delimiter of file is "::". - -.. include:: ../../../demo/recommendation/data/config.json - :code: json - :literal: - -Preprocess Data -``````````````` -You need to install python 3rd party libraries. -IT IS HIGHLY RECOMMEND TO USE VIRTUALENV MAKE A CLEAN PYTHON ENVIRONMENT. - -.. code-block:: bash - - pip install -r requirements.txt - -The general command for preprocessing the dataset is: - -.. code-block:: bash - - cd demo/recommendation - ./preprocess.sh - -And the detail steps are introduced as follows. - -Extract Movie/User features to python object -''''''''''''''''''''''''''''''''''''''''''''' - -There are many features in movie or user in movielens 1m dataset. -Each line of rating file just provides a Movie/User id to refer each movie or user. -We process the movie/user feature file first, and pickle the feature (**Meta**) object as a file. - -Meta config file -................ - -**Meta config file** is used to specific **HOW** to parse each field in dataset. -It could be translated from field config file, or written by hand. -Its file format could be either json or yaml syntax file. Parser will automatically choose the file format by extension name. - -To convert Field config file to meta config file, just run: - -.. code-block:: bash - - cd demo/recommendation/data - python config_generator.py config.json > meta_config.json - -The meta config file shows below: - -.. include:: ../../../demo/recommendation/data/meta_config.json - :code: json - :literal: - -There are two kinds of features in meta\: movie and user. - -* in movie file, whose name is movies.dat - * we just split each line by "::" - * pos 0 is id. - * pos 1 feature: - * name is title. - * it uses regex to parse this feature. - * it is a char based word embedding feature. - * it is a sequence. - * pos 2 feature: - * name is genres. - * type is one hot dense vector. - * dictionary is auto generated by parsing, each key is split by '|' -* in user file, whose name is users.dat - * we just split each line by "::" - * pos 0 is id. - * pos 1 feature: - * name is gender - * just simple char based embedding. - * pos 2 feature: - * name is age - * just whole word embedding. - * embedding id will be sort by word. - * pos 3 feature: - * name is occupation. - * just simple whole word embedding. - - -Meta file -''''''''' - -After having meta config file, we can generate **Meta file**, a python pickle object which stores movie/user information. -The following commands could be run to generate it. - -.. code-block:: bash - - python meta_generator.py ml-1m meta.bin --config=meta_config.json - -And the structure of the meta file :code:`meta.bin` is: - -.. code-block:: text - - +--+ movie - | +--+ __meta__ - | | +--+ raw_meta # each feature meta config. list - | | | + - | | | | # ID Field, we use id as key - | | | +--+ {'count': 3883, 'max': 3952, 'is_key': True, 'type': 'id', 'min': 1} - | | | | - | | | | # Titile field, the dictionary list of embedding. - | | | +--+ {'dict': [ ... ], 'type': 'embedding', 'name': 'title', 'seq': 'sequence'} - | | | | - | | | | # Genres field, the genres dictionary - | | | +--+ {'dict': [ ... ], 'type': 'one_hot_dense', 'name': 'genres'} - | | | - | | +--+ feature_map [1, 2] # a list for raw_meta index for feature field. - | | # it means there are 2 features for each key. - | | # * 0 offset of feature is raw_meta[1], Title. - | | # * 1 offset of feature is raw_meta[2], Genres. - | | - | +--+ 1 # movie 1 features - | | + - | | +---+ [[...], [...]] # title ids, genres dense vector - | | - | +--+ 2 - | | - | +--+ ... - | - +--- user - +--+ __meta__ - | + - | +--+ raw_meta - | | + - | | +--+ id field as user - | | | - | | +--+ {'dict': ['F', 'M'], 'type': 'embedding', 'name': 'gender', 'seq': 'no_sequence'} - | | | - | | +--+ {'dict': ['1', '18', '25', '35', '45', '50', '56'], 'type': 'embedding', 'name': 'age', 'seq': 'no_sequence'} - | | | - | | +--+ {'dict': [...], 'type': 'embedding', 'name': 'occupation', 'seq': 'no_sequence'} - | | - | +--+ feature_map [1, 2, 3] - | - +--+ 1 # user 1 features - | - +--+ 2 - +--+ ... - - -Split Training/Testing files -'''''''''''''''''''''''''''' - -We split :code:`ml-1m/ratings.dat` into a training and testing file. The way to split file is for each user, we split the -rating by two parts. So each user in testing file will have some rating information in training file. - -Use :code:`separate.py` to separate the training and testing file. - -.. code-block:: bash - - python split.py ml-1m/ratings.dat --delimiter="::" --test_ratio=0.1 - -Then two files will be generated\: :code:`ml-1m/ratings.dat.train` and :code:`ml-1m/rating.data.test`. -Move them to workspace :code:`data`, shuffle the train file, and prepare the file list for paddle train. - -.. code-block:: bash - - shuf ml-1m/ratings.dat.train > ratings.dat.train - cp ml-1m/ratings.dat.test . - echo "./data/ratings.dat.train" > train.list - echo "./data/ratings.dat.test" > test.list - - -Neural Network Configuration -```````````````````````````` - -Trainer Config File -''''''''''''''''''' - -The network structure shows below. - -.. image:: rec_regression_network.png - :align: center - :alt: rec_regression_network - -The demo's neural network config file :code:`trainer_config.py` show as below. - -.. literalinclude:: ../../../demo/recommendation/trainer_config.py - :language: python - :lines: 15- - -In this :code:`trainer_config.py`, we just map each feature type to -a feature vector, following shows how to map each feature to a vector shows below. - -* :code:`id`\: Just simple embedding, and then add to fully connected layer. -* :code:`embedding`\: - - if is_sequence, get the embedding and do a text convolutional operation, - get the average pooling result. - - if not sequence, get the embedding and add to fully connected layer. -* :code:`one_host_dense`\: - - just two fully connected layer. - -Then we combine each features of movie into one movie feature by a -:code:`fc_layer` with multiple inputs, and do the same thing to user features, -get one user feature. Then we calculate the cosine similarity of these two -features. - -In these networks, we use several APIs in :ref:`api_trainer_config` . There are - -* Data Layer, :ref:`api_trainer_config_helpers_layers_data_layer` -* Fully Connected Layer, :ref:`api_trainer_config_helpers_layers_fc_layer` -* Embedding Layer, :ref:`api_trainer_config_helpers_layers_embedding_layer` -* Context Projection Layer, :ref:`api_trainer_config_helpers_layers_context_projection` -* Pooling Layer, :ref:`api_trainer_config_helpers_layers_pooling_layer` -* Cosine Similarity Layer, :ref:`api_trainer_config_helpers_layers_cos_sim` -* Text Convolution Pooling Layer, :ref:`api_trainer_config_helpers_network_text_conv_pool` -* Declare Python Data Sources :ref:`api_trainer_config_helpers_data_sources`. - -Data Provider -''''''''''''' - -.. literalinclude:: ../../../demo/recommendation/dataprovider.py - :language: python - :lines: 15- - -The data provider just read the meta.bin and rating file, yield each sample for training. -In this :code:`dataprovider.py`, we should set\: - -* obj.slots\: The feature types and dimension. -* use_seq\: Whether this :code:`dataprovider.py` in sequence mode or not. -* process\: Return each sample of data to :code:`paddle`. - -The data provider details document see :ref:`api_pydataprovider2`. - -Train -````` - -After prepare data, config network, writting data provider, now we can run paddle training. - -The :code:`run.sh` is shown as follow: - -.. literalinclude:: ../../../demo/recommendation/run.sh - :language: bash - :lines: 16- - -It just start a paddle training process, write the log to :code:`log.txt`, -then print it on screen. - -Each command line argument in :code:`run.sh`, please refer to the :ref:`cmd_line_index` page. The short description of these arguments is shown as follow. - -* config\: Tell paddle which file is neural network configuration. -* save_dir\: Tell paddle save model into :code:`./output`. -* use_gpu\: Use gpu or not. Default is false. -* trainer_count\: The compute thread in one machine. -* test_all_data_in_one_period\: Test All Data during one test period. Otherwise, - will test a :code:`batch_size` data in one test period. -* log_period\: Print log after train :code:`log_period` batches. -* dot_period\: Print a :code:`.` after train :code:`dot_period` batches. -* num_passes\: Train at most :code:`num_passes`. - -If training process starts successfully, the output likes follow: - -.. code-block:: text - - I0601 08:07:22.832059 10549 TrainerInternal.cpp:157] Batch=100 samples=160000 AvgCost=4.13494 CurrentCost=4.13494 Eval: CurrentEval: - - I0601 08:07:50.672627 10549 TrainerInternal.cpp:157] Batch=200 samples=320000 AvgCost=3.80957 CurrentCost=3.48421 Eval: CurrentEval: - - I0601 08:08:18.877369 10549 TrainerInternal.cpp:157] Batch=300 samples=480000 AvgCost=3.68145 CurrentCost=3.42519 Eval: CurrentEval: - - I0601 08:08:46.863963 10549 TrainerInternal.cpp:157] Batch=400 samples=640000 AvgCost=3.6007 CurrentCost=3.35847 Eval: CurrentEval: - - I0601 08:09:15.413025 10549 TrainerInternal.cpp:157] Batch=500 samples=800000 AvgCost=3.54811 CurrentCost=3.33773 Eval: CurrentEval: - I0601 08:09:36.058670 10549 TrainerInternal.cpp:181] Pass=0 Batch=565 samples=902826 AvgCost=3.52368 Eval: - I0601 08:09:46.215489 10549 Tester.cpp:101] Test samples=97383 cost=3.32155 Eval: - I0601 08:09:46.215966 10549 GradientMachine.cpp:132] Saving parameters to ./output/model/pass-00000 - I0601 08:09:46.233397 10549 ParamUtil.cpp:99] save dir ./output/model/pass-00000 - I0601 08:09:46.233438 10549 Util.cpp:209] copy trainer_config.py to ./output/model/pass-00000 - I0601 08:09:46.233541 10549 ParamUtil.cpp:147] fileName trainer_config.py - -The model is saved in :code:`output/` directory. You can use :code:`Ctrl-C` to stop training whenever you want. - -Evaluate and Predict -```````````````````` - -After training several passes, you can evaluate them and get the best pass. Just run - -.. code-block:: bash - - ./evaluate.sh - -You will see messages like this: - -.. code-block:: text - - Best pass is 00009, error is 3.06949, which means predict get error as 0.875998002281 - evaluating from pass output/pass-00009 - -Then, you can predict what any user will rate a movie. Just run - -.. code-block:: bash - - python prediction.py 'output/pass-00009/' - -Predictor will read user input, and predict scores. It has a command-line user interface as follows: - -.. code-block:: text - - Input movie_id: 9 - Input user_id: 4 - Prediction Score is 2.56 - Input movie_id: 8 - Input user_id: 2 - Prediction Score is 3.13 diff --git a/dev/doc/_sources/tutorials/semantic_role_labeling/index_en.md.txt b/dev/doc/_sources/tutorials/semantic_role_labeling/index_en.md.txt deleted file mode 100644 index 92d7c634832119c718711a57c16f69492d405f28..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/semantic_role_labeling/index_en.md.txt +++ /dev/null @@ -1,204 +0,0 @@ -```eval_rst -.. _semantic_role_labeling: -``` - -# Semantic Role labeling Tutorial # - -Semantic role labeling (SRL) is a form of shallow semantic parsing whose goal is to discover the predicate-argument structure of each predicate in a given input sentence. SRL is useful as an intermediate step in a wide range of natural language processing tasks, such as information extraction. automatic document categorization and question answering. An instance is as following [1]: - - [ A0 He ] [ AM-MOD would ][ AM-NEG n’t ] [ V accept] [ A1 anything of value ] from [A2 those he was writing about ]. - -- V: verb -- A0: acceptor -- A1: thing accepted -- A2: accepted-from -- A3: Attribute -- AM-MOD: modal -- AM-NEG: negation - -Given the verb "accept", the chunks in sentence would play certain semantic roles. Here, the label scheme is from Penn Proposition Bank. - -To this date, most of the successful SRL systems are built on top of some form of parsing results where pre-defined feature templates over the syntactic structure are used. This tutorial will present an end-to-end system using deep bidirectional long short-term memory (DB-LSTM)[2] for solving the SRL task, which largely outperforms the previous state-of-the-art systems. The system regards SRL task as the sequence labelling problem. - -## Data Description -The relevant paper[2] takes the data set in CoNLL-2005&2012 Shared Task for training and testing. Accordingto data license, the demo adopts the test data set of CoNLL-2005, which can be reached on website. - -To download and process the original data, user just need to execute the following command: - -```bash -cd data -./get_data.sh -``` -Several new files appear in the `data `directory as follows. -```bash -conll05st-release:the test data set of CoNll-2005 shared task -test.wsj.words:the Wall Street Journal data sentences -test.wsj.props: the propositional arguments -feature: the extracted features from data set -``` - -## Training -### DB-LSTM -Please refer to the Sentiment Analysis demo to learn more about the long short-term memory unit. - -Unlike Bidirectional-LSTM that used in Sentiment Analysis demo, the DB-LSTM adopts another way to stack LSTM layer. First a standard LSTM processes the sequence in forward direction. The input and output of this LSTM layer are taken by the next LSTM layer as input, processed in reversed direction. These two standard LSTM layers compose a pair of LSTM. Then we stack LSTM layers pair after pair to obtain the deep LSTM model. - -The following figure shows a temporal expanded 2-layer DB-LSTM network. -
-![pic](./src/network_arch.png) -
- -### Features -Two input features play an essential role in this pipeline: predicate (pred) and argument (argu). Two other features: predicate context (ctx-p) and region mark (mr) are also adopted. Because a single predicate word can not exactly describe the predicate information, especially when the same words appear more than one times in a sentence. With the predicate context, the ambiguity can be largely eliminated. Similarly, we use region mark mr = 1 to denote the argument position if it locates in the predicate context region, or mr = 0 if does not. These four simple features are all we need for our SRL system. Features of one sample with context size set to 1 is showed as following[2]: -
-![pic](./src/feature.jpg) -
- -In this sample, the coresponding labelled sentence is: - -[ A1 A record date ] has [ AM-NEG n't ] been [ V set ] . - -In the demo, we adopt the feature template as above, consists of : `argument`, `predicate`, `ctx-p (p=-1,0,1)`, `mark` and use `B/I/O` scheme to label each argument. These features and labels are stored in `feature` file, and separated by `\t`. - -### Data Provider - -`dataprovider.py` is the python file to wrap data. `hook()` function is to define the data slots for network. The Six features and label are all IndexSlots. -``` -def hook(settings, word_dict, label_dict, **kwargs): - settings.word_dict = word_dict - settings.label_dict = label_dict - #all inputs are integral and sequential type - settings.slots = [ - integer_value_sequence(len(word_dict)), - integer_value_sequence(len(predicate_dict)), - integer_value_sequence(len(word_dict)), - integer_value_sequence(len(word_dict)), - integer_value_sequence(len(word_dict)), - integer_value_sequence(len(word_dict)), - integer_value_sequence(len(word_dict)), - integer_value_sequence(2), - integer_value_sequence(len(label_dict))] -``` -The corresponding data iterator is as following: -``` -@provider(init_hook=hook, should_shuffle=True, calc_batch_size=get_batch_size, - can_over_batch_size=False, cache=CacheType.CACHE_PASS_IN_MEM) -def process(settings, file_name): - with open(file_name, 'r') as fdata: - for line in fdata: - sentence, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, label = \ - line.strip().split('\t') - - words = sentence.split() - sen_len = len(words) - word_slot = [settings.word_dict.get(w, UNK_IDX) for w in words] - - predicate_slot = [settings.predicate_dict.get(predicate)] * sen_len - ctx_n2_slot = [settings.word_dict.get(ctx_n2, UNK_IDX)] * sen_len - ctx_n1_slot = [settings.word_dict.get(ctx_n1, UNK_IDX)] * sen_len - ctx_0_slot = [settings.word_dict.get(ctx_0, UNK_IDX)] * sen_len - ctx_p1_slot = [settings.word_dict.get(ctx_p1, UNK_IDX)] * sen_len - ctx_p2_slot = [settings.word_dict.get(ctx_p2, UNK_IDX)] * sen_len - - marks = mark.split() - mark_slot = [int(w) for w in marks] - - label_list = label.split() - label_slot = [settings.label_dict.get(w) for w in label_list] - yield word_slot, predicate_slot, ctx_n2_slot, ctx_n1_slot, \ - ctx_0_slot, ctx_p1_slot, ctx_p2_slot, mark_slot, label_slot -``` -The `process`function yield 9 lists which are 8 features and label. - -### Neural Network Config -`db_lstm.py` is the neural network config file to load the dictionaries and define the data provider module and network architecture during the training procedure. - -Nine `data_layer` load instances from data provider. Eight features are transformed into embedddings respectively, and mixed by `mixed_layer` . Deep bidirectional LSTM layers extract features for the softmax layer. The objective function is cross entropy of labels. - -### Run Training -The script for training is `train.sh`, user just need to execute: -```bash - ./train.sh -``` -The content in `train.sh`: -``` -paddle train \ - --config=./db_lstm.py \ - --use_gpu=0 \ - --log_period=5000 \ - --trainer_count=1 \ - --show_parameter_stats_period=5000 \ - --save_dir=./output \ - --num_passes=10000 \ - --average_test_period=10000000 \ - --init_model_path=./data \ - --load_missing_parameter_strategy=rand \ - --test_all_data_in_one_period=1 \ -2>&1 | tee 'train.log' -``` - -- \--config=./db_lstm.py : network config file. -- \--use_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train, until now crf_layer do not support GPU -- \--log_period=500: print log every 20 batches. -- \--trainer_count=1: set thread number (or GPU count). -- \--show_parameter_stats_period=5000: show parameter statistic every 100 batches. -- \--save_dir=./output: output path to save models. -- \--num_passes=10000: set pass number, one pass in PaddlePaddle means training all samples in dataset one time. -- \--average_test_period=10000000: do test on average parameter every average_test_period batches -- \--init_model_path=./data: parameter initialization path -- \--load_missing_parameter_strategy=rand: random initialization unexisted parameters -- \--test_all_data_in_one_period=1: test all data in one period - - -After training, the models will be saved in directory `output`. Our training curve is as following: -
-![pic](./src/curve.jpg) -
- -### Run testing -The script for testing is `test.sh`, user just need to execute: -```bash - ./test.sh -``` -The main part in `tesh.sh` -``` -paddle train \ - --config=./db_lstm.py \ - --model_list=$model_list \ - --job=test \ - --config_args=is_test=1 \ -``` - - - \--config=./db_lstm.py: network config file - - \--model_list=$model_list.list: model list file - - \--job=test: indicate the test job - - \--config_args=is_test=1: flag to indicate test - - \--test_all_data_in_one_period=1: test all data in 1 period - - -### Run prediction -The script for prediction is `predict.sh`, user just need to execute: -```bash - ./predict.sh - -``` -In `predict.sh`, user should offer the network config file, model path, label file, word dictionary file, feature file -``` -python predict.py - -c $config_file \ - -w $best_model_path \ - -l $label_file \ - -p $predicate_dict_file \ - -d $dict_file \ - -i $input_file \ - -o $output_file -``` - -`predict.py` is the main executable python script, which includes functions: load model, load data, data prediction. The network model will output the probability distribution of labels. In the demo, we take the label with maximum probability as result. User can also implement the beam search or viterbi decoding upon the probability distribution matrix. - -After prediction, the result is saved in `predict.res`. - -## Reference -[1] Martha Palmer, Dan Gildea, and Paul Kingsbury. The Proposition Bank: An Annotated Corpus of Semantic Roles , Computational Linguistics, 31(1), 2005. - -[2] Zhou, Jie, and Wei Xu. "End-to-end learning of semantic role labeling using recurrent neural networks." Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. diff --git a/dev/doc/_sources/tutorials/sentiment_analysis/index_en.md.txt b/dev/doc/_sources/tutorials/sentiment_analysis/index_en.md.txt deleted file mode 100644 index bb7681db44ca6f286ad6935ddfecb9becb429192..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/sentiment_analysis/index_en.md.txt +++ /dev/null @@ -1,328 +0,0 @@ -# Sentiment Analysis Tutorial - -Sentiment analysis has many applications. A basic task in sentiment analysis is classifying the polarity of a given text at the document, sentence or feature/aspect level. One simple example is to classify the customer reviews in a shopping website, a tourism website, and group buying websites like Amazon, TaoBao, Tmall etc. - -Sentiment analysis is also used to monitor social media based on large amount of reviews or blogs. For example, the researchers analyzed several surveys on consumer confidence and political opinion, found they correlate to sentiment word frequencies in contemporaneous Twitter messages [1]. Another example is to forecast stock movements through analyzing the text content of a daily Twitter blog [2]. - -On the other hand, grabbing the user comments of products and analyzing their sentiment are useful to understand user preferences for companies, products, even competing products. - -This tutorial will guide you through the process of training a Long Short Term Memory (LSTM) Network to classify the sentiment of sentences from [Large Movie Review Dataset](http://ai.stanford.edu/~amaas/data/sentiment/), sometimes known as the Internet Movie Database (IMDB). This dataset contains movie reviews along with their associated binary sentiment polarity labels, namely positive and negative. So randomly guessing yields 50% accuracy. - -## Data Preparation - -### IMDB Data Introduction - -Before training models, we need to preprocess the data and build a dictionary. First, you can use following script to download IMDB dataset and [Moses](http://www.statmt.org/moses/) tool, which is a statistical machine translation system. We provide a data preprocessing script, which is capable of handling not only IMDB data, but also other user-defined data. In order to use the pre-written script, it needs to move labeled train and test samples to another path, which has been done in `get_imdb.sh`. - -``` -cd demo/sentiment/data -./get_imdb.sh -``` -If the data is obtained successfuly, you will see the following files at ```./demo/sentiment/data```: - -``` -aclImdb get_imdb.sh imdb mosesdecoder-master -``` - -* aclImdb: raw dataset downloaded from website. -* imdb: only contains train and test data. -* mosesdecoder-master: Moses tool. - -IMDB dataset contains 25,000 highly polar movie reviews for training, and 25,000 for testing. A negative review has a score ≤ 4 out of 10, and a positive review has a score ≥ 7 out of 10. After running `./get_imdb.sh`, we can find the dataset has the following structure in `aclImdb`. - -``` -imdbEr.txt imdb.vocab README test train -``` -* train: train sets. -* test : test sets. -* imdb.vocab: dictionary. -* imdbEr.txt: expected rating for each token in imdb.vocab. -* README: data documentation. - -The file in train set directory is as follows. The test set also contains them except `unsup` and `urls_unsup.txt`. - -``` -labeledBow.feat neg pos unsup unsupBow.feat urls_neg.txt urls_pos.txt urls_unsup.txt -``` - -* pos: positive samples, contains 12,500 txt files, each file is one movie review. -* neg: negative samples, contains 12,500 txt files, each file is one movie review. -* unsup: unlabeled samples, contains 50,000 txt files. -* urls_xx.txt: urls of each reviews. -* xxBow.feat: already-tokenized bag of words (BoW) features. - -### IMDB Data Preparation - -In this demo, we only use labled train and test set and not use imdb.vocab as dictionary. By default, dictionary is builded on train set. Train set is shuffled and test set is not. `tokenizer.perl` in Moses tool is used to tokenize the words and punctuation. Simply execute the following command to preprcess data. - -``` -cd demo/sentiment/ -./preprocess.sh -``` -preprocess.sh: - -``` -data_dir="./data/imdb" -python preprocess.py -i data_dir -``` - -* data_dir: input data directory. -* preprocess.py: preprocess script. - -If running successfully, you will see `demo/sentiment/data/pre-imdb` directory as follows: - -``` -dict.txt labels.list test.list test_part_000 train.list train_part_000 -``` -* test\_part\_000 and train\_part\_000: all labeled test and train sets. Train sets have be shuffled. -* train.list and test.list: train and test file lists. -* dict.txt: dictionary generated on train sets by default. -* labels.txt: neg 0, pos 1, means label 0 is negative review, label 1 is positive review. - -### User-defined Data Preparation - -If you perform other sentiment classifcation task, you can prepare data as follows. We have provided the scripts to build dictionary and preprocess data. So just organize data as follows. - -``` -dataset -|----train -| |----class1 -| | |----text_files -| |----class2 -| | |----text_files -| | ... -|----test -| |----class1 -| | |----text_files -| |----class2 -| | |----text_files -| | ... -``` -* dataset: 1st directory. -* train, test: 2nd directory. -* class1,class2,...: 3rd directory. -* text_files: samples with text file format. - -All samples with text files format under the same folder are same category. Each text file contains one or more samples and each line is one sample. In order to shuffle fully, the preprocessing is a little different for data with multiple lines in one text file, which needs to set `-m True` in `preprocess.sh`. And tokenizer.perl is used by default. If you don't need it, only set `-t False` in `preprocess.sh'. - -## Training - -In this task, we use Recurrent Neural Network (RNN) of LSTM architecure to train sentiment analysis model. LSTM model was introduced primarily in order to overcome the problem of vanishing gradients. LSTM network resembles a standard recurrent neural network with a hidden layer, but each ordinary node in the hidden layer is replaced by a memory cell. Each memory cell contains four main elements: an input gate, a neuron with a self-recurrent connection, a forget gate and an output gate. More details can be found in the literature [4]. The biggest advantage of the LSTM architecture is that it learns to memorize information over long time intervals without the loss of short time memory. At each time step with a new coming word, historical information stored in the memory block is updated to iteratively learn the sequence representation. - -
![LSTM](./lstm.png)
-
Figure 1. LSTM [3]
- -Sentiment analysis is among the most typical problems in natural language understanding. It aims at predicting the attitude expressed in a sequence. Usually, only some key words, like adjectives and adverbs words, play a major role in predicting the sentiment of sequences or paragraphs. However, some review or comment contexts are very long, such as IMDB dataset. We use LSTM to perform this task for its improved design with the gate mechanism. First, it is able to summarize the representation from word level to context level with variable context length which is adapted by the gate values. Second, it can utilize the expanded context at the sentence level, while most methods are good at utilizing n-gram level knowledge. Third, it learns the paragraph representation directly rather than combining the context level information. This results in this end-to-end framework. - -In this demo we provide two network, namely bidirectional-LSTM and three layers of stacked-LSTM. - -#### Bidirectional-LSTM - -One is a bidirectional LSTM network, connected by fully connected layer and softmax, as shown in Figure 2. - -
![BiLSTM](./bi_lstm.jpg)
-
Figure 2. Bidirectional-LSTM
- -#### Stacked-LSTM -Another is three-layer LSTM structure in Figure 3. The bottom of the figure is word embedding. Next, three LSTM-Hidden layers are connected and the second LSTM is reversed. Then extract the maximum hidden vectors of all time step of hidden and LSTM layer as the representation for the entire sequence. Finally, a fully connected feed forward layer with softmax activation is used to perform the classification task. This network is refered to paper [5]. - -
![StackedLSTM](./stacked_lstm.jpg)
-
Figure 3. Stacked-LSTM for sentiment analysis
- -**Config** - -Switch into `demo/sentiment` directory, `trainer_config.py` file is an example of the config, containing algorithm and newtork configure. The first line imports predefined networks from `sentiment_net.py`. - -trainer_config.py: - -```python -from sentiment_net import * - -data_dir = "./data/pre-imdb" -# whether this config is used for test -is_test = get_config_arg('is_test', bool, False) -# whether this config is used for prediction -is_predict = get_config_arg('is_predict', bool, False) -dict_dim, class_dim = sentiment_data(data_dir, is_test, is_predict) - -################## Algorithm Config ##################### - -settings( - batch_size=128, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - average_window=0.5, - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) - -#################### Network Config ###################### -stacked_lstm_net(dict_dim, class_dim=class_dim, - stacked_num=3, is_predict=is_predict) -#bidirectional_lstm_net(dict_dim, class_dim=class_dim, is_predict=is_predict) -``` - -* **Data Definition**: - * get\_config\_arg(): get arguments setted by `--config_args=xx` in commandline argument. - * Define data provider, here using Python interface to load data. For details, you can refer to the document of PyDataProvider2. - -* **Algorithm Configuration**: - * set batch size of 128. - * set global learning rate. - * use adam optimization. - * set average sgd window. - * set L2 regularization. - * set gradient clipping threshold. -* **Network Configuration**: - * dict_dim: dictionary dimension. - * class_dim: category number, IMDB has two label, namely positive and negative label. - * `stacked_lstm_net`: predefined network as shown in Figure 3, use this network by default. - * `bidirectional_lstm_net`: predefined network as shown in Figure 2. - -**Training** - -Install PaddlePaddle first if necessary. Then you can use script `train.sh` as follows to launch local training. - -``` -cd demo/sentiment/ -./train.sh -``` - -train.sh: - -``` -config=trainer_config.py -output=./model_output -paddle train --config=$config \ - --save_dir=$output \ - --job=train \ - --use_gpu=false \ - --trainer_count=4 \ - --num_passes=10 \ - --log_period=20 \ - --dot_period=20 \ - --show_parameter_stats_period=100 \ - --test_all_data_in_one_period=1 \ - 2>&1 | tee 'train.log' -``` - -* \--config=$config: set network config. -* \--save\_dir=$output: set output path to save models. -* \--job=train: set job mode to train. -* \--use\_gpu=false: use CPU to train, set true, if you install GPU version of PaddlePaddle and want to use GPU to train. -* \--trainer\_count=4: set thread number (or GPU count). -* \--num\_passes=15: set pass number, one pass in PaddlePaddle means training all samples in dataset one time. -* \--log\_period=20: print log every 20 batches. -* \--show\_parameter\_stats\_period=100: show parameter statistic every 100 batches. -* \--test\_all_data\_in\_one\_period=1: test all data every testing. - -If the run succeeds, the output log is saved in path of `demo/sentiment/train.log` and model is saved in path of `demo/sentiment/model_output/`. The output log is explained as follows. - -``` -Batch=20 samples=2560 AvgCost=0.681644 CurrentCost=0.681644 Eval: classification_error_evaluator=0.36875 CurrentEval: classification_error_evaluator=0.36875 -... -Pass=0 Batch=196 samples=25000 AvgCost=0.418964 Eval: classification_error_evaluator=0.1922 -Test samples=24999 cost=0.39297 Eval: classification_error_evaluator=0.149406 -``` -- Batch=xx: means passing xx batches. -- samples=xx: means passing xx samples. -- AvgCost=xx: averaged cost from 0-th batch to current batch. -- CurrentCost=xx: current cost of latest log_period batches. -- Eval: classification\_error\_evaluator=xx: means classfication error from 0-th batch ro current batch. -- CurrentEval: classification\_error\_evaluator: current classfication error of the lates log_period batches. -- Pass=0: Going through all training set one time is called one pass. 0 means going through training set first time. - -By default, we use the `stacked_lstm_net` network, which converges at a faster rate than `bidirectional_lstm_net` when passing same sample number. If you want to use bidirectional LSTM, just remove comment in the last line and comment `stacked_lstm_net`. - -## Testing - -Testing means evaluating the labeled validation set using trained model. - -``` -cd demo/sentiment -./test.sh -``` - -test.sh: - -```bash -function get_best_pass() { - cat $1 | grep -Pzo 'Test .*\n.*pass-.*' | \ - sed -r 'N;s/Test.* error=([0-9]+\.[0-9]+).*\n.*pass-([0-9]+)/\1 \2/g' | \ - sort | head -n 1 -} - -log=train.log -LOG=`get_best_pass $log` -LOG=(${LOG}) -evaluate_pass="model_output/pass-${LOG[1]}" - -echo 'evaluating from pass '$evaluate_pass - -model_list=./model.list -touch $model_list | echo $evaluate_pass > $model_list -net_conf=trainer_config.py -paddle train --config=$net_conf \ - --model_list=$model_list \ - --job=test \ - --use_gpu=false \ - --trainer_count=4 \ - --config_args=is_test=1 \ - 2>&1 | tee 'test.log' -``` - -The function `get_best_pass` gets the best model by classification error rate for testing. In this example, We use test dataset of IMDB as validation by default. Unlike training, it needs to specify `--job=test` and model path, namely `--model_list=$model_list` here. If running successfully, the log is saved in path of `demo/sentiment/test.log`. For example, in our test, the best model is `model_output/pass-00002`, the classification error is 0.115645 as follows. - -``` -Pass=0 samples=24999 AvgCost=0.280471 Eval: classification_error_evaluator=0.115645 -``` - -## Prediction - -`predict.py` provides a predicting interface. You should install python api of PaddlePaddle before using it. One example to predict unlabeled review of IMDB is as follows. Simply running: - -``` -cd demo/sentiment -./predict.sh -``` -predict.sh: - -``` -#Note the default model is pass-00002, you shold make sure the model path -#exists or change the mode path. -model=model_output/pass-00002/ -config=trainer_config.py -label=data/pre-imdb/labels.list -cat ./data/aclImdb/test/pos/10007_10.txt | python predict.py \ - --tconf=$config\ - --model=$model \ - --label=$label \ - --dict=./data/pre-imdb/dict.txt \ - --batch_size=1 -``` - -* `cat ./data/aclImdb/test/pos/10007_10.txt` : the input sample. -* `predict.py` : predicting interface. -* `--tconf=$config` : set network configure. -* ` --model=$model` : set model path. -* `--label=$label` : set dictionary about corresponding relation between integer label and string label. -* `--dict=data/pre-imdb/dict.txt` : set dictionary. -* `--batch_size=1` : set batch size. - -Note you should make sure the default model path `model_output/pass-00002` -exists or change the model path. - -Predicting result of this example: - -``` -Loading parameters from model_output/pass-00002/ -./data/aclImdb/test/pos/10014_7.txt: predicting label is pos -``` -We sincerely appreciate your interest and welcome your contributions. - -## Reference -[1] Brendan O'Connor, Ramnath Balasubramanyan, Bryan R. Routledge, and Noah A. Smith. 2010. [From Tweets to Polls: Linking Text Sentiment to Public Opinion Time Series](http://homes.cs.washington.edu/~nasmith/papers/oconnor+balasubramanyan+routledge+smith.icwsm10.pdf). In ICWSM-2010.
-[2] Johan Bollen, Huina Mao, Xiaojun Zeng. 2011. [Twitter mood predicts the stock market](http://arxiv.org/abs/1010.3003), Journal of Computational Science.
-[3] Alex Graves, Marcus Liwicki, Santiago Fernan- dez, Roman Bertolami, Horst Bunke, and Ju ̈rgen Schmidhuber. 2009. [A novel connectionist system for unconstrained handwriting recognition. IEEE Transactions on Pattern Analysis and Machine In- telligence](http://www.cs.toronto.edu/~graves/tpami_2009.pdf), 31(5):855–868.
-[4] Zachary C. Lipton, [A Critical Review of Recurrent Neural Networks for Sequence Learning](http://arxiv.org/abs/1506.00019v1), arXiv:1506.00019.
-[5] Jie Zhou and Wei Xu; [End-to-end Learning of Semantic Role Labeling Using Recurrent Neural Networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf); ACL-IJCNLP 2015.
diff --git a/dev/doc/_sources/tutorials/text_generation/index_en.md.txt b/dev/doc/_sources/tutorials/text_generation/index_en.md.txt deleted file mode 100644 index 5d8e667c20bd1fda64a6e11a88517d52112b72fa..0000000000000000000000000000000000000000 --- a/dev/doc/_sources/tutorials/text_generation/index_en.md.txt +++ /dev/null @@ -1,338 +0,0 @@ -# Text generation Tutorial # - -Sequence to sequence has been proven to be a powerful model for language generation. It can be used for machine translation, query rewriting, image captioning, etc. - -This tutorial guides you through training a sequence to sequence model for neural machine translation (NMT) network that translates French to English. - -We follow the paper [Neural Machine Translation by Jointly Learning to Align and Translate](http://arxiv.org/abs/1409.0473) , which details the model architecture and training procedure for good performance on WMT-14 dataset. This tutorial reproduces this result in PaddlePaddle. - -We thank @caoying for the pull request that defines the model architecture and solver configurations. - -## Data Preparation ## -### Download and Extract ### -Download the WMT-14 dataset from [http://www-lium.univ-lemans.fr/~schwenk/cslm\_joint\_paper/](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/), extract it, and divide Develop and Test data into separate folder. - -- **Train data**: [bitexts (after selection)](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/bitexts.tgz) -- **Develop and Test data**: [dev+test data](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz) - -To do this, simply run the following commands in linux, otherwise, you need to download, extract, divide, and rename the file suffix respectively. - -```bash -cd demo/seqToseq/data -./wmt14_data.sh -``` - -We should find that the dataset `wmt14` has three folders as shown in the following table. - ------ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
folder nameFrench-English parallel corpora filenumber of total filesize
train_dataccb2_pc30.src, ccb2_pc30.trg, etctwelve3.55G
test_datantst1213.src, ntst1213.trgtwo1636k
gen_datantst14.src, ntst14.trgtwo864k
-
- -- Each folder has French-English parallel corpora -- **XXX.src** are source French files; **XXX.trg** are target English files. -- The number of lines of **XXX.src** and **XXX.trg** should be the same. -- Each line is a French/English sentence. -- There is a one-to-one correspondence between the sentence at the i-th line of **XXX.src** and **XXX.trg**. - -### User Defined Dataset ### - -If you need to do other sequence-to-sequence tasks, such as Paraphrasing, you only need to organize the data as follows, and place them in `demo/seqToseq/data`: - - dataset - train - file1.src file1.trg - file2.src file2.trg - ...... - test - file1.src file1.trg - file2.src file2.trg - ...... - gen - file1.src file1.trg - file2.src file2.trg - ...... -- 1st directory: dataset folder name -- 2nd directory: folder of train, test, and gen. The names of these three folders are fixed. -- 3rd file: Source-Target parallel corpora files. - - **XXX.src** are source files, **XXX.trg** are target files. - - Each line of the file must be a sequence. - - There should be a one-to-one correspondence between the i-th sequence of **XXX.src** and **XXX.trg**. - -## Data Preprocess ## -### Preprocessing Workflow ### -- Concat each Source-Target parallel corpora to be one file: - - concat each **XXX.src** and **XXX.trg** to be **XXX**. - - the i-th line of **XXX** = the i-th line of **XXX.src** + '\t' + the i-th line of **XXX.trg** -- Build source and target dictionary of train data, each dictionary has DICTSIZE words: - - the most frequent (DICTSIZE-3) words - - 3 special token: - - ``: the start of a sequence - - ``: the end of a sequence - - ``: a word not included in dictionary - -### Preprocessing Command and Result -The general command for preprocessing the dataset is: - -```python -cd demo/seqToseq/ -python preprocess.py -i INPUT [-d DICTSIZE] [-m] -``` - -- `-i INPUT`: the path of input original dataset -- `-d DICTSIZE`: the specified word count of dictionary, if not set, dictionary will contain all the words in input dataset -- `-m --mergeDict`: merge source and target dictionary, thus, two dictionaries have the same context - -And you will see messages like this: - - concat parallel corpora for dataset - build source dictionary for train data - build target dictionary for train data - dictionary size is XXX - -Here, you can simply run the command: - -```python -python preprocess.py -i data/wmt14 -d 30000 -``` - -It will take several minutes, and store the preprocessed dataset in `demo/seqToseq/data/pre-wmt14`, the directory has following structure. - - train test gen train.list test.list gen.list src.dict trg.dict - -- **train, test, gen**: folder contains French-English parallel corpora of train data, test data and gen data respectively. Each line of file in folder contains two parts, the former is a French sequence, and the latter is a corresponding English sequence. -- **train.list, test.list, gen.list**: text contains a file list in train folder, test folder and gen folder respectively -- **src.dict, trg.dict**: source (French) / target (English) dictionary, each dictionary has 30000 words: the most frequent 29997 words and 3 special token - -## Model Training ## -### Introduction ### - -Neural machine translation (NMT) aims at building a single neural network that can be jointly tuned to maximize translation performance. Recently proposed NMT models often belong to a family of encoder–decoder models. Encoder-Decoder models encode a source sentence into a fixed-length vector from which a decoder generates a target sentence. - -In this task, we use an extension to the encoder–decoder model which learns to align and translate jointly. Each time the model generates a word in a translation, it searches for a set of positions in the source sentence for the most relevant information. The decoder predicts a target word based on the context vectors associated with these source positions and all the previous generated target words. For more detailed explanation, readers can refer to paper [Neural Machine Translation by Jointly Learning to Align and Translate](http://arxiv.org/abs/1409.0473). - -The most distinguishing feature of this model is that it doesn't encode an input sentence into a single fixed-length vector. Instead, it encodes the input sentence into a sequence of vectors, where one vector corresponds to an input element. A subset of these vectors is chosen adaptively while decoding the translated sentence. This frees a NMT model from having to squash all the information of a source sentence, regardless of its length, into a fixed-length vector. The improvement of this model is more apparent for longer sentences, but the improvement can be observed for sentences of any length. -
![](./encoder-decoder-attention-model.png)
-
Figure 1. Encoder-Decoder-Attention-Model
- -### Training Model in PaddlePaddle ### -We need to create a model config file before training. Here is an example `demo/seqToseq/translation/train.conf`. The first three lines import python function for defining network, and define the job_mode and attention_mode. - -```python -from seqToseq_net import * -is_generating = False - -### Data Definiation -train_conf = seq_to_seq_data(data_dir = "./data/pre-wmt14", - is_generating = is_generating) - -### Algorithm Configuration -settings( - learning_method = AdamOptimizer(), - batch_size = 50, - learning_rate = 5e-4) - -### Network Architecture -gru_encoder_decoder(train_conf, is_generating) -``` - -1. **Data Definiation**: We define a SeqToSeq train and test data in our example. It returns train_conf as the configuration, following is its input arguments: - - data_dir: directory of train data and test data - - is\_generating: whether this config is used for generating, here is false -2. **Algorithm Configuration**: We use the SGD training algorithm (default), ADAM learning method in our example, specify batch_size as 50, and learning rate as 5e-4. -3. **Network Architecture**: We use an attention version of GRU Encoder-Decoder network in our example. It consists a bidirectional GRU as an encoder and a decoder that emulates searching through a source sentence during decoding a translation. - -### Training Command and Result### -After writing the model config, we can train the model by running the command: - -```bash -cd demo/seqToseq/translation -./train.sh -``` - -The `train.sh` is shown as follows: - -```bash -paddle train \ ---config='translation/train.conf' \ ---save_dir='translation/model' \ ---use_gpu=false \ ---num_passes=16 \ ---show_parameter_stats_period=100 \ ---trainer_count=4 \ ---log_period=10 \ ---dot_period=5 \ -2>&1 | tee 'translation/train.log' -``` -- config: set config of neural network -- save_dir: set output path to save models -- use_gpu: whether to use GPU to train, here use CPU -- num_passes: set number of passes. One pass in paddle means training all samples in dataset one time -- show_parameter_stats_period: here show parameter statistic every 100 batches -- trainer_count: set number of CPU threads or GPU devices -- log_period: here print log every 10 batches -- dot_period: here print '.' every 5 batches - -The training loss function is printed every 10 batch by default, and you will see messages like this: - - I0719 19:16:45.952062 15563 TrainerInternal.cpp:160] Batch=10 samples=500 AvgCost=198.475 CurrentCost=198.475 Eval: classification_error_evaluator=0.737155 CurrentEval: classification_error_evaluator=0.737155 - I0719 19:17:56.707319 15563 TrainerInternal.cpp:160] Batch=20 samples=1000 AvgCost=157.479 CurrentCost=116.483 Eval: classification_error_evaluator=0.698392 CurrentEval: classification_error_evaluator=0.659065 - ..... -- AvgCost: Average Cost from 0th batch to current batch -- CurrentCost: Cost in current batch -- classification\_error\_evaluator(Eval): False prediction rate for each word from 0th evaluation to current evaluation -- classification\_error\_evaluator(CurrentEval): False prediction rate for each word in current evaluation - -And when the classification\_error\_evaluator is less than 0.35, the model is trained sucessfully. - -## Text Generation ## -### Introduction ### - -Generally speaking, the NMT model is conditioned on the encodings of the source sentence, and then to predict the next target word by given the current target word. In the training process, the current word is always knowns as the ground truth, by contrast. In the generating process, the current word is the output of the decoder in last time step, which is accessed to from a memory in PaddlePaddle. - -Besides, we use Beam Search to generate sequences. Beam search uses breadth-first search to build its search tree. At each level of the tree, it generates all successors of the states at the current level, sorting them in increasing order of heuristic cost. However, it only stores a predetermined number of best states at each level (called the beam size). - -### Pretrained model ### -We trained the model on a cluster with 50 nodes, each node has two 6-core CPUs. We trained 16 passes in 5 days, where each pass takes 7 hours. The model_dir has 16 sub-folder, each of which contains the whole model parameters with 202MB size. And we find pass-00012 model has the highest BLEU 27.77 (see paper [BLEU: a Method for Automatic Evaluation of Machine Translation](http://www.aclweb.org/anthology/P02-1040.pdf)). To download and extract this model, simply run the following commands in linux. - -```bash -cd demo/seqToseq/data -./wmt14_model.sh -``` - -### Generating Model in PaddlePaddle ### -We need to create a model config file before translating French sequence. Here is an example `demo/seqToseq/translation/gen.conf`, the first three lines import python function for defining network, and define the job\_mode and attention\_mode. - -```python -from seqToseq_net import * -is_generating = True - -################## Data Definiation ##################### -gen_conf = seq_to_seq_data(data_dir = "./data/pre-wmt14", - is_generating = is_generating, - gen_result = "./translation/gen_result") - -############## Algorithm Configuration ################## -settings( - learning_method = AdamOptimizer(), - batch_size = 1, - learning_rate = 0) - -################# Network configure ##################### -gru_encoder_decoder(gen_conf, is_generating) -``` - -1. **Data Definiation**: We defines an SeqToSeq gen data in our example. It returns gen_conf as the configuration, following is its input arguments: - - data\_dir: directory of gen data -   - is\_generating: whether this config is used for generating, here is true -   - gen\_result: file to store the generation result -2. **Algorithm Configuration**: We use SGD traing algorithm in generation, and specify batch_size as 1 (each time generate one sequence), and learning rate as 0. -3. **Network Architecture**: Essentially the same as the training model. - -### Generating Command and Result ### -After writing the model config, we can do text translation from French to English by running the command: - -```bash -cd demo/seqToseq/translation -./gen.sh -``` - -The `gen.sh` is shown as follows, unlike training, there are some different arguments to specify: - -```bash -paddle train \ ---job=test \ ---config='translation/gen.conf' \ ---save_dir='data/wmt14_model' \ ---use_gpu=true \ ---num_passes=13 \ ---test_pass=12 \ ---trainer_count=1 \ -2>&1 | tee 'translation/gen.log' -``` -- job: set job mode to test -- save_dir: the path of saved models -- num_passes and test_pass: loading model parameters from test_pass to (num_passes - 1), here only loads `data/wmt14_model/pass-00012` - -You will see messages like this: - - I0706 14:48:31.178915 31441 GradientMachine.cpp:143] Loading parameters from data/wmt14_model/pass-00012 - I0706 14:48:40.012039 31441 Tester.cpp:125] Batch=100 samples=100 AvgCost=0 - I0706 14:48:48.898632 31441 Tester.cpp:125] Batch=200 samples=200 AvgCost=0 - ... - -And the generating result in `demo/seqToseq/translation/gen_result` likes: - - 0 - 0 -11.1314 The about the width of the seats while large controls are at stake - 1 -11.1519 The on the width of the seats while large controls are at stake - 2 -11.5988 The about the width of the seats while large controls are at stake . - - 1 - 0 -24.4149 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of the Dubai . - 1 -26.9524 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of Dubai ' s . - 2 -27.9574 The dispute is between the major aircraft manufacturers about the width of the tourist seats on the flights , paving the way for a confrontation during the month of Dubai ' s Dubai . - ... - -- This is the beam search result, where beam size is 3 -- '0' in 1st-line and '1' in 6th-line mean the sequence-id in gen data -- Other six lines list the beam search results - - The 2nd-column is the score of beam search (from large to small) - - The 3rd-colunm is the generating English sequence -- There is 2 special tokens: - - ``: the end of a sequence - - ``: a word not included in dictionary - -### Bleu Evalutaion ### -Human evaluations of machine translation are extensive but expensive. Paper [BLEU: a Method for Automatic Evaluation of Machine Translation](http://www.aclweb.org/anthology/P02-1040.pdf) presents a method as an automated understudy to skilled human judges which substitutes for them when there is need for quick or frequent evaluations. [Moses](http://www.statmt.org/moses/) is a statistical machine translation system, and we use [multi-bleu.perl](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/multi-bleu.perl) of it to do Bleu Evalution. To download this script, simply run the following command: - -```bash -cd demo/seqToseq/translation -./moses_bleu.sh -``` - -Since the standard translation is alrealy downloaded as `data/wmt14/gen/ntst14.trg`, we can do Bleu Evalution by running the command: - -```bash -cd demo/seqToseq/translation -./eval_bleu.sh FILE BEAMSIZE -``` - -- FILE: the generation result file -- BEAMSIZE: expand width in beam search diff --git a/dev/doc/_static/ajax-loader.gif b/dev/doc/_static/ajax-loader.gif deleted file mode 100644 index 61faf8cab23993bd3e1560bff0668bd628642330..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/ajax-loader.gif and /dev/null differ diff --git a/dev/doc/_static/basic.css b/dev/doc/_static/basic.css deleted file mode 100644 index 7ed0e58edb31c235c086d98790de21cabd63f3ec..0000000000000000000000000000000000000000 --- a/dev/doc/_static/basic.css +++ /dev/null @@ -1,632 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox input[type="text"] { - width: 170px; -} - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, .highlighted { - background-color: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -div.code-block-caption { - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -div.code-block-caption + div > div.highlight > pre { - margin-top: 0; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: relative; - left: 0px; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/dev/doc/_static/comment-bright.png b/dev/doc/_static/comment-bright.png deleted file mode 100644 index 15e27edb12ac25701ac0ac21b97b52bb4e45415e..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/comment-bright.png and /dev/null differ diff --git a/dev/doc/_static/comment-close.png b/dev/doc/_static/comment-close.png deleted file mode 100644 index 4d91bcf57de866a901a89a2a68c0f36af1114841..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/comment-close.png and /dev/null differ diff --git a/dev/doc/_static/comment.png b/dev/doc/_static/comment.png deleted file mode 100644 index dfbc0cbd512bdeefcb1984c99d8e577efb77f006..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/comment.png and /dev/null differ diff --git a/dev/doc/_static/css/badge_only.css b/dev/doc/_static/css/badge_only.css deleted file mode 100644 index 7e17fb148c63fa9780c3dd65cef5b7593927ef62..0000000000000000000000000000000000000000 --- a/dev/doc/_static/css/badge_only.css +++ /dev/null @@ -1,2 +0,0 @@ -.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../font/fontawesome_webfont.eot");src:url("../font/fontawesome_webfont.eot?#iefix") format("embedded-opentype"),url("../font/fontawesome_webfont.woff") format("woff"),url("../font/fontawesome_webfont.ttf") format("truetype"),url("../font/fontawesome_webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:0.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;border-top:solid 10px #343131;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}img{width:100%;height:auto}} -/*# sourceMappingURL=badge_only.css.map */ diff --git a/dev/doc/_static/css/override.css b/dev/doc/_static/css/override.css deleted file mode 100644 index 438a87848a0176a7857177aeb672c59f35bd8d4b..0000000000000000000000000000000000000000 --- a/dev/doc/_static/css/override.css +++ /dev/null @@ -1,506 +0,0 @@ -body { - padding-top: 80px; - background-image: none !important; - font-family: Roboto; -} -a, a:focus, a:hover, a:visited { - color: #597cf1; -} -.site-header { - position: fixed; - top: 0; - width: 100%; - left: 0; - z-index: 99; - background: #333; - height: 80px; - display: -webkit-flex; - display: -ms-flex; - display: -o-flex; - display: flex; - flex-flow: row nowrap; - justify-content: space-between; - box-shadow: #ccc 0 3px 3px; -} -.site-header > div { - height: 80px; - display: inline-block; - background-color: #2f323a; - padding: 0 30px; -} -.site-header .site-logo { - line-height: 80px; - width: 290px; - flex: 0 1 290px; -} -.site-header .site-logo > a { - display: inline-block; - width: 230px; -} -.site-header .site-nav-links { - flex: 0 1 100%; -} -.site-header .site-nav-links .site-menu { - height: 30px; - line-height: 30px; - font-size: 12px; - background: -webkit-linear-gradient(#282b33, #2f323a); - background: -o-linear-gradient(#282b33, #2f323a); - background: -moz-linear-gradient(#282b33, #2f323a); - background: linear-gradient(to left, #282b33, #2f323a); - margin-right: -30px; - padding-right: 30px; -} -.site-header .site-nav-links .site-menu .site-page-links { - display: inline-block; - float: right; - margin-right: 20px; -} -.site-header .site-nav-links .site-menu .site-page-links> li { - display: inline-block; - float: left; -} -.site-header .site-nav-links .site-menu .site-page-links > li > a { - color: #a7adbd; - display: inline-block; - height: 30px; - padding: 0 20px; - font-size: 12px; -} -.site-header .site-nav-links .site-menu .site-page-links > li:hover > a, -.site-header .site-nav-links .site-menu .site-page-links > li.active > a { - background-color: #2f323a; - color: #bcc1d0; -} -.site-header .site-nav-links .site-menu .site-page-links > li.active > a { - font-weight: bold; -} -.site-header .site-nav-links .site-menu .fork-on-github { - color: #597cf1; - line-height: 30px; - display: inline-block; - padding: 0 0 0 20px; - float: right; - position: relative; -} -.site-header .site-nav-links .site-menu .fork-on-github .fa { - margin-right: 5px; - font-size: 16px; - vertical-align: middle; -} -.site-header .site-nav-links .site-menu .language-switcher { - height: 30px; - display: inline-block; - float: right; - line-height: 30px; - padding: 0 20px; - position: relative; -} -.site-header .site-nav-links .site-menu .language-switcher > a { - color: #a7adbd; -} -.site-header .site-nav-links .site-menu .language-switcher.open > a { - background-color: #24272f; - color: #bcc1d0; -} -.site-header .site-nav-links .site-menu .language-switcher .fa { - margin-left: 5px; -} -.site-header .site-nav-links .site-menu .language-switcher .fa-angle-down { - display: inline; -} -.site-header .site-nav-links .site-menu .language-switcher.open .fa-angle-down { - display: none; -} -.site-header .site-nav-links .site-menu .language-switcher .fa-angle-up { - display: none; -} -.site-header .site-nav-links .site-menu .language-switcher.open .fa-angle-up { - display: inline; -} -.site-header .site-nav-links .site-menu .fork-on-github:before, -.site-header .site-nav-links .site-menu .language-switcher:before { - width: 1px; - height: 12px; - top: 9px; - background-color: #3a3d47; - left: 0; - display: inline-block; - position: absolute; - content: ""; -} -.site-header .site-nav-links .site-menu .language-switcher .dropdown-menu { - display: none; - position: absolute; - box-shadow: #ccc 0 0 5px; - background-color: #fff; - width: 100%; - left: 0; - top: 30px; -} -.site-header .site-nav-links .site-menu .language-switcher .dropdown-menu > li { - line-height: 30px; - padding: 0 20px; -} -.site-header .site-nav-links .site-menu .language-switcher .dropdown-menu > li:hover { - background-color: #f7f8fe; -} -.site-header .site-nav-links .site-menu .language-switcher .dropdown-menu > li + li { - border-top: 1px solid #dedfe5; -} -.site-header .site-nav-links .site-menu .language-switcher .dropdown-menu > li > a { - color: #2f323a; -} -.site-header .site-nav-links .site-menu .language-switcher.open .dropdown-menu { - display: inline-block; -} -.site-header .site-nav-links .doc-module { - display: block; - height: 50px; - line-height: 50px; -} -.site-header .site-nav-links .doc-module > ul > li { - display: inline-block; - float: left; -} -.site-header .site-nav-links .doc-module > ul > li > a { - color: #c9cbd0; - font-size: 14px; - display: inline-block; - height: 50px; - line-height: 50px; - border-bottom: 2px solid transparent; - padding: 0 20px; -} -.site-header .site-nav-links .doc-module > ul > li:hover > a { - color: #fff; -} -.site-header .site-nav-links .doc-module > ul > li.current > a { - border-bottom-color: #fff; - color: #fff; -} -.site-header .site-nav-links .doc-module [role="search"]{ - float: right; -} -.site-header .site-nav-links .doc-module [role="search"] input { - background-color: #3a3d47; - border-radius: 15px; - color: #a7adbd; - border: 1px solid transparent; - padding: 6px 15px; - width: 180px; - box-shadow: none; - transition: all .2s; - -webkit-transition: all .2s; - -moz-transition: all .2s; - -o-transition: all .2s; - background-repeat: no-repeat; - background-position: 145px center; - background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAKTWlDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVN3WJP3Fj7f92UPVkLY8LGXbIEAIiOsCMgQWaIQkgBhhBASQMWFiApWFBURnEhVxILVCkidiOKgKLhnQYqIWotVXDjuH9yntX167+3t+9f7vOec5/zOec8PgBESJpHmomoAOVKFPDrYH49PSMTJvYACFUjgBCAQ5svCZwXFAADwA3l4fnSwP/wBr28AAgBw1S4kEsfh/4O6UCZXACCRAOAiEucLAZBSAMguVMgUAMgYALBTs2QKAJQAAGx5fEIiAKoNAOz0ST4FANipk9wXANiiHKkIAI0BAJkoRyQCQLsAYFWBUiwCwMIAoKxAIi4EwK4BgFm2MkcCgL0FAHaOWJAPQGAAgJlCLMwAIDgCAEMeE80DIEwDoDDSv+CpX3CFuEgBAMDLlc2XS9IzFLiV0Bp38vDg4iHiwmyxQmEXKRBmCeQinJebIxNI5wNMzgwAABr50cH+OD+Q5+bk4eZm52zv9MWi/mvwbyI+IfHf/ryMAgQAEE7P79pf5eXWA3DHAbB1v2upWwDaVgBo3/ldM9sJoFoK0Hr5i3k4/EAenqFQyDwdHAoLC+0lYqG9MOOLPv8z4W/gi372/EAe/tt68ABxmkCZrcCjg/1xYW52rlKO58sEQjFu9+cj/seFf/2OKdHiNLFcLBWK8ViJuFAiTcd5uVKRRCHJleIS6X8y8R+W/QmTdw0ArIZPwE62B7XLbMB+7gECiw5Y0nYAQH7zLYwaC5EAEGc0Mnn3AACTv/mPQCsBAM2XpOMAALzoGFyolBdMxggAAESggSqwQQcMwRSswA6cwR28wBcCYQZEQAwkwDwQQgbkgBwKoRiWQRlUwDrYBLWwAxqgEZrhELTBMTgN5+ASXIHrcBcGYBiewhi8hgkEQcgIE2EhOogRYo7YIs4IF5mOBCJhSDSSgKQg6YgUUSLFyHKkAqlCapFdSCPyLXIUOY1cQPqQ28ggMor8irxHMZSBslED1AJ1QLmoHxqKxqBz0XQ0D12AlqJr0Rq0Hj2AtqKn0UvodXQAfYqOY4DRMQ5mjNlhXIyHRWCJWBomxxZj5Vg1Vo81Yx1YN3YVG8CeYe8IJAKLgBPsCF6EEMJsgpCQR1hMWEOoJewjtBK6CFcJg4Qxwicik6hPtCV6EvnEeGI6sZBYRqwm7iEeIZ4lXicOE1+TSCQOyZLkTgohJZAySQtJa0jbSC2kU6Q+0hBpnEwm65Btyd7kCLKArCCXkbeQD5BPkvvJw+S3FDrFiOJMCaIkUqSUEko1ZT/lBKWfMkKZoKpRzame1AiqiDqfWkltoHZQL1OHqRM0dZolzZsWQ8ukLaPV0JppZ2n3aC/pdLoJ3YMeRZfQl9Jr6Afp5+mD9HcMDYYNg8dIYigZaxl7GacYtxkvmUymBdOXmchUMNcyG5lnmA+Yb1VYKvYqfBWRyhKVOpVWlX6V56pUVXNVP9V5qgtUq1UPq15WfaZGVbNQ46kJ1Bar1akdVbupNq7OUndSj1DPUV+jvl/9gvpjDbKGhUaghkijVGO3xhmNIRbGMmXxWELWclYD6yxrmE1iW7L57Ex2Bfsbdi97TFNDc6pmrGaRZp3mcc0BDsax4PA52ZxKziHODc57LQMtPy2x1mqtZq1+rTfaetq+2mLtcu0W7eva73VwnUCdLJ31Om0693UJuja6UbqFutt1z+o+02PreekJ9cr1Dund0Uf1bfSj9Rfq79bv0R83MDQINpAZbDE4Y/DMkGPoa5hpuNHwhOGoEctoupHEaKPRSaMnuCbuh2fjNXgXPmasbxxirDTeZdxrPGFiaTLbpMSkxeS+Kc2Ua5pmutG003TMzMgs3KzYrMnsjjnVnGueYb7ZvNv8jYWlRZzFSos2i8eW2pZ8ywWWTZb3rJhWPlZ5VvVW16xJ1lzrLOtt1ldsUBtXmwybOpvLtqitm63Edptt3xTiFI8p0in1U27aMez87ArsmuwG7Tn2YfYl9m32zx3MHBId1jt0O3xydHXMdmxwvOuk4TTDqcSpw+lXZxtnoXOd8zUXpkuQyxKXdpcXU22niqdun3rLleUa7rrStdP1o5u7m9yt2W3U3cw9xX2r+00umxvJXcM970H08PdY4nHM452nm6fC85DnL152Xlle+70eT7OcJp7WMG3I28Rb4L3Le2A6Pj1l+s7pAz7GPgKfep+Hvqa+It89viN+1n6Zfgf8nvs7+sv9j/i/4XnyFvFOBWABwQHlAb2BGoGzA2sDHwSZBKUHNQWNBbsGLww+FUIMCQ1ZH3KTb8AX8hv5YzPcZyya0RXKCJ0VWhv6MMwmTB7WEY6GzwjfEH5vpvlM6cy2CIjgR2yIuB9pGZkX+X0UKSoyqi7qUbRTdHF09yzWrORZ+2e9jvGPqYy5O9tqtnJ2Z6xqbFJsY+ybuIC4qriBeIf4RfGXEnQTJAntieTE2MQ9ieNzAudsmjOc5JpUlnRjruXcorkX5unOy553PFk1WZB8OIWYEpeyP+WDIEJQLxhP5aduTR0T8oSbhU9FvqKNolGxt7hKPJLmnVaV9jjdO31D+miGT0Z1xjMJT1IreZEZkrkj801WRNberM/ZcdktOZSclJyjUg1plrQr1zC3KLdPZisrkw3keeZtyhuTh8r35CP5c/PbFWyFTNGjtFKuUA4WTC+oK3hbGFt4uEi9SFrUM99m/ur5IwuCFny9kLBQuLCz2Lh4WfHgIr9FuxYji1MXdy4xXVK6ZHhp8NJ9y2jLspb9UOJYUlXyannc8o5Sg9KlpUMrglc0lamUycturvRauWMVYZVkVe9ql9VbVn8qF5VfrHCsqK74sEa45uJXTl/VfPV5bdra3kq3yu3rSOuk626s91m/r0q9akHV0IbwDa0b8Y3lG19tSt50oXpq9Y7NtM3KzQM1YTXtW8y2rNvyoTaj9nqdf13LVv2tq7e+2Sba1r/dd3vzDoMdFTve75TsvLUreFdrvUV99W7S7oLdjxpiG7q/5n7duEd3T8Wej3ulewf2Re/ranRvbNyvv7+yCW1SNo0eSDpw5ZuAb9qb7Zp3tXBaKg7CQeXBJ9+mfHvjUOihzsPcw83fmX+39QjrSHkr0jq/dawto22gPaG97+iMo50dXh1Hvrf/fu8x42N1xzWPV56gnSg98fnkgpPjp2Snnp1OPz3Umdx590z8mWtdUV29Z0PPnj8XdO5Mt1/3yfPe549d8Lxw9CL3Ytslt0utPa49R35w/eFIr1tv62X3y+1XPK509E3rO9Hv03/6asDVc9f41y5dn3m978bsG7duJt0cuCW69fh29u0XdwruTNxdeo94r/y+2v3qB/oP6n+0/rFlwG3g+GDAYM/DWQ/vDgmHnv6U/9OH4dJHzEfVI0YjjY+dHx8bDRq98mTOk+GnsqcTz8p+Vv9563Or59/94vtLz1j82PAL+YvPv655qfNy76uprzrHI8cfvM55PfGm/K3O233vuO+638e9H5ko/ED+UPPR+mPHp9BP9z7nfP78L/eE8/sl0p8zAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAEpSURBVHjanNO7K8dhFMfx1w8LBqVM5DLxF7hMTGSQpAwmJSkDizAZLSb5Ayi3clsMFgwWISGXkoSyGYRSym15fvr27duvH5/leTqd8+6c83ye1NLatohqMIgWVOEV+5jDAr7ElBO5j+IIH+hBJRqwjDHsoTQOyAvnCPpRi4tYziVmMY2dkPMc7aAG42hPKE7rAwMBNhEfYQgzOJNZ3xhGL4qigGasyk43OEdjFFCGe9nrNtT8Al5Q8AdAMd6jgFPU/QFwiN0oYD4sJzdLwBiuo4A5vGEKqQyF1ahPcuInOsJrrKMiwWx9OMAWWpOc+BD2MImr4Ik7FIb4AzqRH6zdhU1IxT4TlKAJ5XjCMU6CkaANi2lIXsKsj1jJsIsNdKc7yfE/pSGTPwMABBFCGflm+rsAAAAASUVORK5CYII="); -} -.site-header .site-nav-links .doc-module [role="search"] input:focus { - width: 300px; -} -.site-header .site-nav-links .doc-module [role="search"] input:focus { - background-position: 265px center; -} -.site-header .site-nav-links .doc-module [role="search"] input:hover, -.site-header .site-nav-links .doc-module [role="search"] input:focus { - color: #fff; - border-color: #597cf1; - background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAKTWlDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVN3WJP3Fj7f92UPVkLY8LGXbIEAIiOsCMgQWaIQkgBhhBASQMWFiApWFBURnEhVxILVCkidiOKgKLhnQYqIWotVXDjuH9yntX167+3t+9f7vOec5/zOec8PgBESJpHmomoAOVKFPDrYH49PSMTJvYACFUjgBCAQ5svCZwXFAADwA3l4fnSwP/wBr28AAgBw1S4kEsfh/4O6UCZXACCRAOAiEucLAZBSAMguVMgUAMgYALBTs2QKAJQAAGx5fEIiAKoNAOz0ST4FANipk9wXANiiHKkIAI0BAJkoRyQCQLsAYFWBUiwCwMIAoKxAIi4EwK4BgFm2MkcCgL0FAHaOWJAPQGAAgJlCLMwAIDgCAEMeE80DIEwDoDDSv+CpX3CFuEgBAMDLlc2XS9IzFLiV0Bp38vDg4iHiwmyxQmEXKRBmCeQinJebIxNI5wNMzgwAABr50cH+OD+Q5+bk4eZm52zv9MWi/mvwbyI+IfHf/ryMAgQAEE7P79pf5eXWA3DHAbB1v2upWwDaVgBo3/ldM9sJoFoK0Hr5i3k4/EAenqFQyDwdHAoLC+0lYqG9MOOLPv8z4W/gi372/EAe/tt68ABxmkCZrcCjg/1xYW52rlKO58sEQjFu9+cj/seFf/2OKdHiNLFcLBWK8ViJuFAiTcd5uVKRRCHJleIS6X8y8R+W/QmTdw0ArIZPwE62B7XLbMB+7gECiw5Y0nYAQH7zLYwaC5EAEGc0Mnn3AACTv/mPQCsBAM2XpOMAALzoGFyolBdMxggAAESggSqwQQcMwRSswA6cwR28wBcCYQZEQAwkwDwQQgbkgBwKoRiWQRlUwDrYBLWwAxqgEZrhELTBMTgN5+ASXIHrcBcGYBiewhi8hgkEQcgIE2EhOogRYo7YIs4IF5mOBCJhSDSSgKQg6YgUUSLFyHKkAqlCapFdSCPyLXIUOY1cQPqQ28ggMor8irxHMZSBslED1AJ1QLmoHxqKxqBz0XQ0D12AlqJr0Rq0Hj2AtqKn0UvodXQAfYqOY4DRMQ5mjNlhXIyHRWCJWBomxxZj5Vg1Vo81Yx1YN3YVG8CeYe8IJAKLgBPsCF6EEMJsgpCQR1hMWEOoJewjtBK6CFcJg4Qxwicik6hPtCV6EvnEeGI6sZBYRqwm7iEeIZ4lXicOE1+TSCQOyZLkTgohJZAySQtJa0jbSC2kU6Q+0hBpnEwm65Btyd7kCLKArCCXkbeQD5BPkvvJw+S3FDrFiOJMCaIkUqSUEko1ZT/lBKWfMkKZoKpRzame1AiqiDqfWkltoHZQL1OHqRM0dZolzZsWQ8ukLaPV0JppZ2n3aC/pdLoJ3YMeRZfQl9Jr6Afp5+mD9HcMDYYNg8dIYigZaxl7GacYtxkvmUymBdOXmchUMNcyG5lnmA+Yb1VYKvYqfBWRyhKVOpVWlX6V56pUVXNVP9V5qgtUq1UPq15WfaZGVbNQ46kJ1Bar1akdVbupNq7OUndSj1DPUV+jvl/9gvpjDbKGhUaghkijVGO3xhmNIRbGMmXxWELWclYD6yxrmE1iW7L57Ex2Bfsbdi97TFNDc6pmrGaRZp3mcc0BDsax4PA52ZxKziHODc57LQMtPy2x1mqtZq1+rTfaetq+2mLtcu0W7eva73VwnUCdLJ31Om0693UJuja6UbqFutt1z+o+02PreekJ9cr1Dund0Uf1bfSj9Rfq79bv0R83MDQINpAZbDE4Y/DMkGPoa5hpuNHwhOGoEctoupHEaKPRSaMnuCbuh2fjNXgXPmasbxxirDTeZdxrPGFiaTLbpMSkxeS+Kc2Ua5pmutG003TMzMgs3KzYrMnsjjnVnGueYb7ZvNv8jYWlRZzFSos2i8eW2pZ8ywWWTZb3rJhWPlZ5VvVW16xJ1lzrLOtt1ldsUBtXmwybOpvLtqitm63Edptt3xTiFI8p0in1U27aMez87ArsmuwG7Tn2YfYl9m32zx3MHBId1jt0O3xydHXMdmxwvOuk4TTDqcSpw+lXZxtnoXOd8zUXpkuQyxKXdpcXU22niqdun3rLleUa7rrStdP1o5u7m9yt2W3U3cw9xX2r+00umxvJXcM970H08PdY4nHM452nm6fC85DnL152Xlle+70eT7OcJp7WMG3I28Rb4L3Le2A6Pj1l+s7pAz7GPgKfep+Hvqa+It89viN+1n6Zfgf8nvs7+sv9j/i/4XnyFvFOBWABwQHlAb2BGoGzA2sDHwSZBKUHNQWNBbsGLww+FUIMCQ1ZH3KTb8AX8hv5YzPcZyya0RXKCJ0VWhv6MMwmTB7WEY6GzwjfEH5vpvlM6cy2CIjgR2yIuB9pGZkX+X0UKSoyqi7qUbRTdHF09yzWrORZ+2e9jvGPqYy5O9tqtnJ2Z6xqbFJsY+ybuIC4qriBeIf4RfGXEnQTJAntieTE2MQ9ieNzAudsmjOc5JpUlnRjruXcorkX5unOy553PFk1WZB8OIWYEpeyP+WDIEJQLxhP5aduTR0T8oSbhU9FvqKNolGxt7hKPJLmnVaV9jjdO31D+miGT0Z1xjMJT1IreZEZkrkj801WRNberM/ZcdktOZSclJyjUg1plrQr1zC3KLdPZisrkw3keeZtyhuTh8r35CP5c/PbFWyFTNGjtFKuUA4WTC+oK3hbGFt4uEi9SFrUM99m/ur5IwuCFny9kLBQuLCz2Lh4WfHgIr9FuxYji1MXdy4xXVK6ZHhp8NJ9y2jLspb9UOJYUlXyannc8o5Sg9KlpUMrglc0lamUycturvRauWMVYZVkVe9ql9VbVn8qF5VfrHCsqK74sEa45uJXTl/VfPV5bdra3kq3yu3rSOuk626s91m/r0q9akHV0IbwDa0b8Y3lG19tSt50oXpq9Y7NtM3KzQM1YTXtW8y2rNvyoTaj9nqdf13LVv2tq7e+2Sba1r/dd3vzDoMdFTve75TsvLUreFdrvUV99W7S7oLdjxpiG7q/5n7duEd3T8Wej3ulewf2Re/ranRvbNyvv7+yCW1SNo0eSDpw5ZuAb9qb7Zp3tXBaKg7CQeXBJ9+mfHvjUOihzsPcw83fmX+39QjrSHkr0jq/dawto22gPaG97+iMo50dXh1Hvrf/fu8x42N1xzWPV56gnSg98fnkgpPjp2Snnp1OPz3Umdx590z8mWtdUV29Z0PPnj8XdO5Mt1/3yfPe549d8Lxw9CL3Ytslt0utPa49R35w/eFIr1tv62X3y+1XPK509E3rO9Hv03/6asDVc9f41y5dn3m978bsG7duJt0cuCW69fh29u0XdwruTNxdeo94r/y+2v3qB/oP6n+0/rFlwG3g+GDAYM/DWQ/vDgmHnv6U/9OH4dJHzEfVI0YjjY+dHx8bDRq98mTOk+GnsqcTz8p+Vv9563Or59/94vtLz1j82PAL+YvPv655qfNy76uprzrHI8cfvM55PfGm/K3O233vuO+638e9H5ko/ED+UPPR+mPHp9BP9z7nfP78L/eE8/sl0p8zAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAEpSURBVHjanNO9K4ZhFMfxz4MFg1Im8jJ5/gIvExMZJCnFpCRlYBEGGS0m+QMoLwOyGCwyWISEvJQklM0glFLeluvR3d3d08Nvua5O53w751y/K9Uz+SyiNIbRihq8Yh+LWMaXmPIi93Ec4QN9qEYjVjGBPZTHAQXhHMMg6nARy7nEAuawE3Keox2kMYWOhOKMPjAUYNPxEUYwjzPZ9Y1R9KMkCmjButx0g3M0RQEVuJe7bkPNL+AFRX8AlOI9CjhF/R8Ah9iNApbCcvJzBEzgOgpYxBtmkcpSWIuGJCd+ojO8xgaqEsw2gANsoy3JiQ9hDzO4Cp64Q3GIP6ALhcHa3diCVOwzQRmaUYknHOMkGAnasZKBFCTM+oi1LLvYRG+mkzz/UwYy8zMAmkpBg3fGpFUAAAAASUVORK5CYII="); -} -.doc-menu-vertical { - display: inline-block; - float: left; - width: 240px; - height: 100%; - background-color: #ecedee; - position: absolute; - left: 0; - top: 0; - overflow: hidden; - padding: 0; - border-right: 1px solid #dddfe3; -} -.doc-menu-vertical > ul { - display: none; -} -.doc-menu-vertical > ul.current{ - display: block; -} -.doc-menu-vertical > ul.current > li.toctree-l1 { - display: none; -} -.doc-menu-vertical > ul.current > li.toctree-l1.current { - display: block; -} -.doc-menu-vertical > ul.current > li.toctree-l1.current > a { - display: none; -} -.doc-menu-vertical .toctree-l2 a { - width: 100%; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; - padding-right: 30px; -} -.doc-menu-vertical .toctree-l2 > a { - font-size: 14px; - color: #2f323a; - padding-left: 30px; - line-height: 50px; - display: block; - font-weight: bold; - border-bottom: 1px solid #dddfe3; -} -.doc-menu-vertical .toctree-l2.has-child > a:after { - font-family: "FontAwesome"; - display: inline-block; - font-style: normal; - font-weight: normal; - text-decoration: inherit; - content: ""; - float: right; - line-height: 50px; - color: #a7adbd; - position: absolute; - right: 15px; -} -.doc-menu-vertical .toctree-l2.has-child.current > a:after { - content: ""; -} -.doc-menu-vertical .toctree-l2 > a + ul{ - background-color: #e4e6e9; - height: 0; - overflow: hidden; -} -.doc-menu-vertical .toctree-l2.current > a + ul { - border-bottom: 1px solid #dddfe3; - height: auto; -} -.doc-menu-vertical .toctree-l2 li.active > a { - background-color: #597cf1; - color: #fff; -} -.doc-menu-vertical .toctree-l3 > a { - font-size: 12px; - color: #2f323a; - padding-left: 30px; - line-height: 40px; - display: block; -} -.doc-menu-vertical .toctree-l4 > a { - font-size: 12px; - color: #64697b; - padding-left: 50px; - line-height: 30px; - display: block; -} -.doc-menu-vertical .toctree-l5 > a { - font-size: 14px; - color: #ccc; - padding-left: 40px; - display: block; -} -.local-toc { - position: absolute; - height: 100%; - background-color: #f6f7f8; - top: 0; - left: 240px; - padding: 0; - z-index: 9; -} -.local-toc:after { - content: ""; - position: absolute; - height: 100%; - width: 1px; - display: inline-block; - right: 0; - background-color: #dddfe3; - top: 0; - z-index: -1; -} -.local-toc:hover a { - width: auto; -} -.local-toc > ul > li a { - position: relative; - font-size: 12px; - overflow: hidden; - display: none; -} -.local-toc > ul > li > ul > li a { - display: block; - border-top: 1px solid transparent; - border-bottom: 1px solid transparent; - padding-right: 20px; - width: 50px; -} -.local-toc > ul > li > ul > li > ul > li > ul a { - display: none; -} -.local-toc > ul > li > ul li > a:after { - content: ""; - display: inline-block; - width: 1px; - height: 100%; - background-color: transparent; - position: absolute; - right: 0; - top: 0; -} -.local-toc > ul > li > ul li a:hover{ - background-color: #e6eaf7 !important; -} -.local-toc > ul > li > ul li a:hover:after { - background-color: #e6eaf7 !important; -} -.local-toc > ul > li > ul li.active > a { - color: #ff9711; - background-color: #fff; - border-top: 1px solid #dddfe3; - border-bottom: 1px solid #dddfe3; -} -.local-toc > ul > li > ul li.active > a:before { - background-color: #ff9711; - width: 10px; - height: 10px; - margin: 15px 20px; - border-radius: 5px; -} -.local-toc > ul > li > ul li.active > a:after { - background-color: #fff; -} -.local-toc > ul > li > ul > li { - position: relative; - line-height: 40px; - white-space: nowrap; -} -.local-toc > ul > li > ul > li > a { - color: #64697b; -} -.local-toc > ul > li > ul > li > a + ul { - display: none; -} -.local-toc > ul > li > ul > li > a:before { - display: inline-block; - content: ""; - width: 6px; - height: 6px; - background-color: #ccc; - border-radius: 3px; - margin: 17px 22px; - float: left; -} -.local-toc > ul > li > ul > li > ul > li > a { - color: #a7adbd; -} -.local-toc > ul > li > ul > li > ul > li > a:before { - display: inline-block; - content: ""; - width: 6px; - height: 6px; - background-color: #ccc; - border-radius: 3px; - margin: 17px 22px; - float: left; -} -.main-content-wrap { - position: absolute; - width: 100%; - top: 80px; - bottom: 0; - overflow: auto; - background-color: #f6f7f8; -} -.doc-content-wrap { - margin-left: 290px; - height: 100%; - position: relative; - padding-top: 60px; - background-color: #fff; -} -.doc-content-wrap > div[role='navigation'] { - position: absolute; - top: 0; - width: 100%; - left: 0; - padding: 0 30px; - height: 60px; -} -.wy-breadcrumbs { - line-height: 50px; - height: 60px; - background-image: url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAAUCAYAAABMDlehAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAA4ZpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNS1jMDIxIDc5LjE1NTc3MiwgMjAxNC8wMS8xMy0xOTo0NDowMCAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0UmVmPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VSZWYjIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDpjMjhmMGQ3ZC0wODU3LTQ0ZTctOGRhZi00NGU3OTc1ZmM2MzkiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6NzRBN0NEODRBRTM2MTFFNjlGMDI4RUM3M0VDQzY4NTkiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6NzRBN0NEODNBRTM2MTFFNjlGMDI4RUM3M0VDQzY4NTkiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTQgKE1hY2ludG9zaCkiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDozNWQwMzI1ZC01ZDAyLTQ1YTYtODUxOS1lNWUzNjU5NGFhMzAiIHN0UmVmOmRvY3VtZW50SUQ9ImFkb2JlOmRvY2lkOnBob3Rvc2hvcDozZGVmZmY0OS1mNjA4LTExNzktYTRlZC1kZjJiNGY3N2YwNzMiLz4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz7FGmP1AAAAKUlEQVR42mK4/+DpfwY9Q0tBJgYGhv8g4h8uFoKLEGOAc9FYSARAgAEAUgMQYBNmQ7sAAAAASUVORK5CYII="); - background-repeat: repeat no-repeat; - background-position: center 50px; -} -.wy-breadcrumbs > li { - color: #ccc; -} -.wy-breadcrumbs > li a { - color: #ff9711; - padding: 0; -} -.wy-breadcrumbs > li:first-child a { - color: #597cf1; -} -.wy-nav-content{ - max-width: none; - overflow: auto; - position: relative; - padding: 30px; - background-color: #fff; -} -.wy-nav-content h1 { - font-size: 24px; - color: #2f323a; - margin-bottom: 30px; -} -.wy-nav-content h2 { - font-size: 20px; - color: #2f323a; - margin-bottom: 30px; -} -.wy-nav-content h3 { - font-size: 18px; - color: #2f323a; - margin-bottom: 30px; -} -.wy-nav-content h4 { - font-size: 16px; - color: #2f323a; - margin-bottom: 30px; -} -.wy-nav-content p + h1, -.wy-nav-content p + h2, -.wy-nav-content p + h3, -.wy-nav-content p + h4 { - margin-top: 20px; -} -.wy-nav-content p{ - color: #2f323a; - margin-bottom: 20px; - font-size: 14px; -} -#search-results h2 { - font-size: 24px; - margin: 20px 0 10px 0; -} -#search-results p { - color: #a7adbd; -} -#search-results ul.search > li { - border-bottom: none; -} -#search-results ul.search > li > a { - color: #597cf1; -} -.rst-content .highlighted{ - background-color: transparent; - color: #ff9711; - padding: 0; -} diff --git a/dev/doc/_static/css/theme.css b/dev/doc/_static/css/theme.css deleted file mode 100644 index 7be93399a4f530da7eb43e2c214ec42cea89a6c3..0000000000000000000000000000000000000000 --- a/dev/doc/_static/css/theme.css +++ /dev/null @@ -1,5 +0,0 @@ -*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}[hidden]{display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:hover,a:active{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;color:#000;text-decoration:none}mark{background:#ff0;color:#000;font-style:italic;font-weight:bold}pre,code,.rst-content tt,.rst-content code,kbd,samp{font-family:monospace,serif;_font-family:"courier new",monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:before,q:after{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}ul,ol,dl{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:0;margin:0;padding:0}label{cursor:pointer}legend{border:0;*margin-left:-7px;padding:0;white-space:normal}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*width:13px;*height:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top;resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:0.2em 0;background:#ccc;color:#000;padding:0.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{html,body,section{background:none !important}*{box-shadow:none !important;text-shadow:none !important;filter:none !important;-ms-filter:none !important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:0.5cm}p,h2,.rst-content .toctree-wrapper p.caption,h3{orphans:3;widows:3}h2,.rst-content .toctree-wrapper p.caption,h3{page-break-after:avoid}}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.btn,input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"],select,textarea,.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a,.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a,.wy-nav-top a{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}/*! - * Font Awesome 4.2.0 by @davegandy - http://fontawesome.io - @fontawesome - * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) - */@font-face{font-family:'FontAwesome';src:url("../fonts/fontawesome-webfont.eot?v=4.2.0");src:url("../fonts/fontawesome-webfont.eot?#iefix&v=4.2.0") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff?v=4.2.0") format("woff"),url("../fonts/fontawesome-webfont.ttf?v=4.2.0") format("truetype"),url("../fonts/fontawesome-webfont.svg?v=4.2.0#fontawesomeregular") format("svg");font-weight:normal;font-style:normal}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:0.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:0.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:solid 0.08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.wy-menu-vertical li span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.rst-content .pull-left.admonition-title,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content dl dt .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.rst-content code.download span.pull-left:first-child,.pull-left.icon{margin-right:.3em}.fa.pull-right,.wy-menu-vertical li span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.rst-content .pull-right.admonition-title,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content dl dt .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.rst-content code.download span.pull-right:first-child,.pull-right.icon{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0);-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-remove:before,.fa-close:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-gear:before,.fa-cog:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-rotate-right:before,.fa-repeat:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.rst-content .admonition-title:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-warning:before,.fa-exclamation-triangle:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-gears:before,.fa-cogs:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-save:before,.fa-floppy-o:before{content:""}.fa-square:before{content:""}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.wy-dropdown .caret:before,.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-unsorted:before,.fa-sort:before{content:""}.fa-sort-down:before,.fa-sort-desc:before{content:""}.fa-sort-up:before,.fa-sort-asc:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-legal:before,.fa-gavel:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-flash:before,.fa-bolt:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-paste:before,.fa-clipboard:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-unlink:before,.fa-chain-broken:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:""}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:""}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:""}.fa-euro:before,.fa-eur:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-rupee:before,.fa-inr:before{content:""}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:""}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:""}.fa-won:before,.fa-krw:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-turkish-lira:before,.fa-try:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-institution:before,.fa-bank:before,.fa-university:before{content:""}.fa-mortar-board:before,.fa-graduation-cap:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:""}.fa-file-zip-o:before,.fa-file-archive-o:before{content:""}.fa-file-sound-o:before,.fa-file-audio-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before{content:""}.fa-ge:before,.fa-empire:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-send:before,.fa-paper-plane:before{content:""}.fa-send-o:before,.fa-paper-plane-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:""}.fa-meanpath:before{content:""}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context{font-family:inherit}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before{font-family:"FontAwesome";display:inline-block;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa,a .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,a .rst-content .admonition-title,.rst-content a .admonition-title,a .rst-content h1 .headerlink,.rst-content h1 a .headerlink,a .rst-content h2 .headerlink,.rst-content h2 a .headerlink,a .rst-content h3 .headerlink,.rst-content h3 a .headerlink,a .rst-content h4 .headerlink,.rst-content h4 a .headerlink,a .rst-content h5 .headerlink,.rst-content h5 a .headerlink,a .rst-content h6 .headerlink,.rst-content h6 a .headerlink,a .rst-content dl dt .headerlink,.rst-content dl dt a .headerlink,a .rst-content p.caption .headerlink,.rst-content p.caption a .headerlink,a .rst-content tt.download span:first-child,.rst-content tt.download a span:first-child,a .rst-content code.download span:first-child,.rst-content code.download a span:first-child,a .icon{display:inline-block;text-decoration:inherit}.btn .fa,.btn .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .btn span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.btn .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.btn .rst-content .admonition-title,.rst-content .btn .admonition-title,.btn .rst-content h1 .headerlink,.rst-content h1 .btn .headerlink,.btn .rst-content h2 .headerlink,.rst-content h2 .btn .headerlink,.btn .rst-content h3 .headerlink,.rst-content h3 .btn .headerlink,.btn .rst-content h4 .headerlink,.rst-content h4 .btn .headerlink,.btn .rst-content h5 .headerlink,.rst-content h5 .btn .headerlink,.btn .rst-content h6 .headerlink,.rst-content h6 .btn .headerlink,.btn .rst-content dl dt .headerlink,.rst-content dl dt .btn .headerlink,.btn .rst-content p.caption .headerlink,.rst-content p.caption .btn .headerlink,.btn .rst-content tt.download span:first-child,.rst-content tt.download .btn span:first-child,.btn .rst-content code.download span:first-child,.rst-content code.download .btn span:first-child,.btn .icon,.nav .fa,.nav .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand,.nav .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.nav .rst-content .admonition-title,.rst-content .nav .admonition-title,.nav .rst-content h1 .headerlink,.rst-content h1 .nav .headerlink,.nav .rst-content h2 .headerlink,.rst-content h2 .nav .headerlink,.nav .rst-content h3 .headerlink,.rst-content h3 .nav .headerlink,.nav .rst-content h4 .headerlink,.rst-content h4 .nav .headerlink,.nav .rst-content h5 .headerlink,.rst-content h5 .nav .headerlink,.nav .rst-content h6 .headerlink,.rst-content h6 .nav .headerlink,.nav .rst-content dl dt .headerlink,.rst-content dl dt .nav .headerlink,.nav .rst-content p.caption .headerlink,.rst-content p.caption .nav .headerlink,.nav .rst-content tt.download span:first-child,.rst-content tt.download .nav span:first-child,.nav .rst-content code.download span:first-child,.rst-content code.download .nav span:first-child,.nav .icon{display:inline}.btn .fa.fa-large,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.btn .rst-content .fa-large.admonition-title,.rst-content .btn .fa-large.admonition-title,.btn .rst-content h1 .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.btn .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .btn .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .btn span.fa-large:first-child,.btn .rst-content code.download span.fa-large:first-child,.rst-content code.download .btn span.fa-large:first-child,.btn .fa-large.icon,.nav .fa.fa-large,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand,.nav .rst-content .fa-large.admonition-title,.rst-content .nav .fa-large.admonition-title,.nav .rst-content h1 .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.nav .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.nav .rst-content code.download span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.nav .fa-large.icon{line-height:0.9em}.btn .fa.fa-spin,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.btn .rst-content .fa-spin.admonition-title,.rst-content .btn .fa-spin.admonition-title,.btn .rst-content h1 .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.btn .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .btn .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .btn span.fa-spin:first-child,.btn .rst-content code.download span.fa-spin:first-child,.rst-content code.download .btn span.fa-spin:first-child,.btn .fa-spin.icon,.nav .fa.fa-spin,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand,.nav .rst-content .fa-spin.admonition-title,.rst-content .nav .fa-spin.admonition-title,.nav .rst-content h1 .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.nav .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.nav .rst-content code.download span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.nav .fa-spin.icon{display:inline-block}.btn.fa:before,.wy-menu-vertical li span.btn.toctree-expand:before,.rst-content .btn.admonition-title:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content dl dt .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.rst-content code.download span.btn:first-child:before,.btn.icon:before{opacity:0.5;-webkit-transition:opacity 0.05s ease-in;-moz-transition:opacity 0.05s ease-in;transition:opacity 0.05s ease-in}.btn.fa:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.rst-content code.download span.btn:first-child:hover:before,.btn.icon:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before,.btn-mini .rst-content .admonition-title:before,.rst-content .btn-mini .admonition-title:before,.btn-mini .rst-content h1 .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.btn-mini .rst-content dl dt .headerlink:before,.rst-content dl dt .btn-mini .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.rst-content tt.download .btn-mini span:first-child:before,.btn-mini .rst-content code.download span:first-child:before,.rst-content code.download .btn-mini span:first-child:before,.btn-mini .icon:before{font-size:14px;vertical-align:-15%}.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.wy-alert-title,.rst-content .admonition-title{color:#fff;font-weight:bold;display:block;color:#fff;background:#6ab0de;margin:-12px;padding:6px 12px;margin-bottom:12px}.wy-alert.wy-alert-danger,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.admonition-todo{background:#fdf3f2}.wy-alert.wy-alert-danger .wy-alert-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .danger .wy-alert-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .danger .admonition-title,.rst-content .error .admonition-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title{background:#f29f97}.wy-alert.wy-alert-warning,.rst-content .wy-alert-warning.note,.rst-content .attention,.rst-content .caution,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.tip,.rst-content .warning,.rst-content .wy-alert-warning.seealso,.rst-content .admonition-todo{background:#ffedcc}.wy-alert.wy-alert-warning .wy-alert-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .attention .wy-alert-title,.rst-content .caution .wy-alert-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .admonition-todo .wy-alert-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .attention .admonition-title,.rst-content .caution .admonition-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .warning .admonition-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .admonition-todo .admonition-title{background:#f0b37e}.wy-alert.wy-alert-info,.rst-content .note,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.rst-content .seealso,.rst-content .wy-alert-info.admonition-todo{background:#e7f2fa}.wy-alert.wy-alert-info .wy-alert-title,.rst-content .note .wy-alert-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.rst-content .note .admonition-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .seealso .admonition-title,.rst-content .wy-alert-info.admonition-todo .admonition-title{background:#6ab0de}.wy-alert.wy-alert-success,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.warning,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.admonition-todo{background:#dbfaf4}.wy-alert.wy-alert-success .wy-alert-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .hint .wy-alert-title,.rst-content .important .wy-alert-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .hint .admonition-title,.rst-content .important .admonition-title,.rst-content .tip .admonition-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.admonition-todo .admonition-title{background:#1abc9c}.wy-alert.wy-alert-neutral,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.admonition-todo{background:#f3f6f6}.wy-alert.wy-alert-neutral .wy-alert-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .admonition-title{color:#404040;background:#e1e4e5}.wy-alert.wy-alert-neutral a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.admonition-todo a{color:#2980B9}.wy-alert p:last-child,.rst-content .note p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.rst-content .seealso p:last-child,.rst-content .admonition-todo p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0px;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,0.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all 0.3s ease-in;-moz-transition:all 0.3s ease-in;transition:all 0.3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27AE60}.wy-tray-container li.wy-tray-item-info{background:#2980B9}.wy-tray-container li.wy-tray-item-warning{background:#E67E22}.wy-tray-container li.wy-tray-item-danger{background:#E74C3C}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width: 768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px 12px;color:#fff;border:1px solid rgba(0,0,0,0.1);background-color:#27AE60;text-decoration:none;font-weight:normal;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:0px 1px 2px -1px rgba(255,255,255,0.5) inset,0px -2px 0px 0px rgba(0,0,0,0.1) inset;outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all 0.1s linear;-moz-transition:all 0.1s linear;transition:all 0.1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:0px -1px 0px 0px rgba(0,0,0,0.05) inset,0px 2px 0px 0px rgba(0,0,0,0.1) inset;padding:8px 12px 6px 12px}.btn:visited{color:#fff}.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:0.4;cursor:not-allowed;box-shadow:none}.btn-disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:0.4;cursor:not-allowed;box-shadow:none}.btn-disabled:hover,.btn-disabled:focus,.btn-disabled:active{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:0.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980B9 !important}.btn-info:hover{background-color:#2e8ece !important}.btn-neutral{background-color:#f3f6f6 !important;color:#404040 !important}.btn-neutral:hover{background-color:#e5ebeb !important;color:#404040}.btn-neutral:visited{color:#404040 !important}.btn-success{background-color:#27AE60 !important}.btn-success:hover{background-color:#295 !important}.btn-danger{background-color:#E74C3C !important}.btn-danger:hover{background-color:#ea6153 !important}.btn-warning{background-color:#E67E22 !important}.btn-warning:hover{background-color:#e98b39 !important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f !important}.btn-link{background-color:transparent !important;color:#2980B9;box-shadow:none;border-color:transparent !important}.btn-link:hover{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:active{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:visited{color:#9B59B6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:before,.wy-btn-group:after{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:solid 1px #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,0.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980B9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:solid 1px #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type="search"]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980B9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned input,.wy-form-aligned textarea,.wy-form-aligned select,.wy-form-aligned .wy-help-inline,.wy-form-aligned label{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{border:0;margin:0;padding:0}legend{display:block;width:100%;border:0;padding:0;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label{display:block;margin:0 0 0.3125em 0;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;*zoom:1;max-width:68em;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#E74C3C}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full input[type="text"],.wy-control-group .wy-form-full input[type="password"],.wy-control-group .wy-form-full input[type="email"],.wy-control-group .wy-form-full input[type="url"],.wy-control-group .wy-form-full input[type="date"],.wy-control-group .wy-form-full input[type="month"],.wy-control-group .wy-form-full input[type="time"],.wy-control-group .wy-form-full input[type="datetime"],.wy-control-group .wy-form-full input[type="datetime-local"],.wy-control-group .wy-form-full input[type="week"],.wy-control-group .wy-form-full input[type="number"],.wy-control-group .wy-form-full input[type="search"],.wy-control-group .wy-form-full input[type="tel"],.wy-control-group .wy-form-full input[type="color"],.wy-control-group .wy-form-halves input[type="text"],.wy-control-group .wy-form-halves input[type="password"],.wy-control-group .wy-form-halves input[type="email"],.wy-control-group .wy-form-halves input[type="url"],.wy-control-group .wy-form-halves input[type="date"],.wy-control-group .wy-form-halves input[type="month"],.wy-control-group .wy-form-halves input[type="time"],.wy-control-group .wy-form-halves input[type="datetime"],.wy-control-group .wy-form-halves input[type="datetime-local"],.wy-control-group .wy-form-halves input[type="week"],.wy-control-group .wy-form-halves input[type="number"],.wy-control-group .wy-form-halves input[type="search"],.wy-control-group .wy-form-halves input[type="tel"],.wy-control-group .wy-form-halves input[type="color"],.wy-control-group .wy-form-thirds input[type="text"],.wy-control-group .wy-form-thirds input[type="password"],.wy-control-group .wy-form-thirds input[type="email"],.wy-control-group .wy-form-thirds input[type="url"],.wy-control-group .wy-form-thirds input[type="date"],.wy-control-group .wy-form-thirds input[type="month"],.wy-control-group .wy-form-thirds input[type="time"],.wy-control-group .wy-form-thirds input[type="datetime"],.wy-control-group .wy-form-thirds input[type="datetime-local"],.wy-control-group .wy-form-thirds input[type="week"],.wy-control-group .wy-form-thirds input[type="number"],.wy-control-group .wy-form-thirds input[type="search"],.wy-control-group .wy-form-thirds input[type="tel"],.wy-control-group .wy-form-thirds input[type="color"]{width:100%}.wy-control-group .wy-form-full{float:left;display:block;margin-right:2.35765%;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child{margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n+1){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child{margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control{margin:6px 0 0 0;font-size:90%}.wy-control-no-input{display:inline-block;margin:6px 0 0 0;font-size:90%}.wy-control-group.fluid-input input[type="text"],.wy-control-group.fluid-input input[type="password"],.wy-control-group.fluid-input input[type="email"],.wy-control-group.fluid-input input[type="url"],.wy-control-group.fluid-input input[type="date"],.wy-control-group.fluid-input input[type="month"],.wy-control-group.fluid-input input[type="time"],.wy-control-group.fluid-input input[type="datetime"],.wy-control-group.fluid-input input[type="datetime-local"],.wy-control-group.fluid-input input[type="week"],.wy-control-group.fluid-input input[type="number"],.wy-control-group.fluid-input input[type="search"],.wy-control-group.fluid-input input[type="tel"],.wy-control-group.fluid-input input[type="color"]{width:100%}.wy-form-message-inline{display:inline-block;padding-left:0.3em;color:#666;vertical-align:middle;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:0.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;*overflow:visible}input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border 0.3s linear;-moz-transition:border 0.3s linear;transition:border 0.3s linear}input[type="datetime-local"]{padding:0.34375em 0.625em}input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0;margin-right:0.3125em;*height:13px;*width:13px}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}input[type="text"]:focus,input[type="password"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus{outline:0;outline:thin dotted \9;border-color:#333}input.no-focus:focus{border-color:#ccc !important}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:1px auto #129FEA}input[type="text"][disabled],input[type="password"][disabled],input[type="email"][disabled],input[type="url"][disabled],input[type="date"][disabled],input[type="month"][disabled],input[type="time"][disabled],input[type="datetime"][disabled],input[type="datetime-local"][disabled],input[type="week"][disabled],input[type="number"][disabled],input[type="search"][disabled],input[type="tel"][disabled],input[type="color"][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#E74C3C;border:1px solid #E74C3C}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#E74C3C}input[type="file"]:focus:invalid:focus,input[type="radio"]:focus:invalid:focus,input[type="checkbox"]:focus:invalid:focus{outline-color:#E74C3C}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif}select,textarea{padding:0.5em 0.625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border 0.3s linear;-moz-transition:border 0.3s linear;transition:border 0.3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type="radio"][disabled],input[type="checkbox"][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:solid 1px #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{width:36px;height:12px;margin:12px 0;position:relative;border-radius:4px;background:#ccc;cursor:pointer;-webkit-transition:all 0.2s ease-in-out;-moz-transition:all 0.2s ease-in-out;transition:all 0.2s ease-in-out}.wy-switch:before{position:absolute;content:"";display:block;width:18px;height:18px;border-radius:4px;background:#999;left:-3px;top:-3px;-webkit-transition:all 0.2s ease-in-out;-moz-transition:all 0.2s ease-in-out;transition:all 0.2s ease-in-out}.wy-switch:after{content:"false";position:absolute;left:48px;display:block;font-size:12px;color:#ccc}.wy-switch.active{background:#1e8449}.wy-switch.active:before{left:24px;background:#27AE60}.wy-switch.active:after{content:"true"}.wy-switch.disabled,.wy-switch.active.disabled{cursor:not-allowed}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#E74C3C}.wy-control-group.wy-control-group-error input[type="text"],.wy-control-group.wy-control-group-error input[type="password"],.wy-control-group.wy-control-group-error input[type="email"],.wy-control-group.wy-control-group-error input[type="url"],.wy-control-group.wy-control-group-error input[type="date"],.wy-control-group.wy-control-group-error input[type="month"],.wy-control-group.wy-control-group-error input[type="time"],.wy-control-group.wy-control-group-error input[type="datetime"],.wy-control-group.wy-control-group-error input[type="datetime-local"],.wy-control-group.wy-control-group-error input[type="week"],.wy-control-group.wy-control-group-error input[type="number"],.wy-control-group.wy-control-group-error input[type="search"],.wy-control-group.wy-control-group-error input[type="tel"],.wy-control-group.wy-control-group-error input[type="color"]{border:solid 1px #E74C3C}.wy-control-group.wy-control-group-error textarea{border:solid 1px #E74C3C}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:0.5em 0.625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27AE60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#E74C3C}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#E67E22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980B9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width: 480px){.wy-form button[type="submit"]{margin:0.7em 0 0}.wy-form input[type="text"],.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0.3em;display:block}.wy-form label{margin-bottom:0.3em;display:block}.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:0.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0 0}.wy-form .wy-help-inline,.wy-form-message-inline,.wy-form-message{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width: 768px){.tablet-hide{display:none}}@media screen and (max-width: 480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.wy-table,.rst-content table.docutils,.rst-content table.field-list{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.wy-table caption,.rst-content table.docutils caption,.rst-content table.field-list caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td,.wy-table th,.rst-content table.docutils th,.rst-content table.field-list th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.wy-table td:first-child,.rst-content table.docutils td:first-child,.rst-content table.field-list td:first-child,.wy-table th:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list th:first-child{border-left-width:0}.wy-table thead,.rst-content table.docutils thead,.rst-content table.field-list thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.wy-table thead th,.rst-content table.docutils thead th,.rst-content table.field-list thead th{font-weight:bold;border-bottom:solid 2px #e1e4e5}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td{background-color:transparent;vertical-align:middle}.wy-table td p,.rst-content table.docutils td p,.rst-content table.field-list td p{line-height:18px}.wy-table td p:last-child,.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child{margin-bottom:0}.wy-table .wy-table-cell-min,.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min{width:1%;padding-right:0}.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:gray;font-size:90%}.wy-table-tertiary{color:gray;font-size:80%}.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td,.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td{background-color:#f3f6f6}.wy-table-backed{background-color:#f3f6f6}.wy-table-bordered-all,.rst-content table.docutils{border:1px solid #e1e4e5}.wy-table-bordered-all td,.rst-content table.docutils td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.wy-table-bordered-all tbody>tr:last-child td,.rst-content table.docutils tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px 0;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0 !important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980B9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9B59B6}html{height:100%;overflow-x:hidden}body{font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;font-weight:normal;color:#404040;min-height:100%;overflow-x:hidden;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#E67E22 !important}a.wy-text-warning:hover{color:#eb9950 !important}.wy-text-info{color:#2980B9 !important}a.wy-text-info:hover{color:#409ad5 !important}.wy-text-success{color:#27AE60 !important}a.wy-text-success:hover{color:#36d278 !important}.wy-text-danger{color:#E74C3C !important}a.wy-text-danger:hover{color:#ed7669 !important}.wy-text-neutral{color:#404040 !important}a.wy-text-neutral:hover{color:#595959 !important}h1,h2,.rst-content .toctree-wrapper p.caption,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif}p{line-height:24px;margin:0;font-size:16px;margin-bottom:24px}h1{font-size:175%}h2,.rst-content .toctree-wrapper p.caption{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}code,.rst-content tt,.rst-content code{white-space:nowrap;max-width:100%;background:#fff;border:solid 1px #e1e4e5;font-size:75%;padding:0 5px;font-family:Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;color:#E74C3C;overflow-x:auto}code.code-large,.rst-content tt.code-large{font-size:90%}.wy-plain-list-disc,.rst-content .section ul,.rst-content .toctree-wrapper ul,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.wy-plain-list-disc li,.rst-content .section ul li,.rst-content .toctree-wrapper ul li,article ul li{list-style:disc;margin-left:24px}.wy-plain-list-disc li p:last-child,.rst-content .section ul li p:last-child,.rst-content .toctree-wrapper ul li p:last-child,article ul li p:last-child{margin-bottom:0}.wy-plain-list-disc li ul,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li ul,article ul li ul{margin-bottom:0}.wy-plain-list-disc li li,.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,article ul li li{list-style:circle}.wy-plain-list-disc li li li,.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,article ul li li li{list-style:square}.wy-plain-list-disc li ol li,.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,article ul li ol li{list-style:decimal}.wy-plain-list-decimal,.rst-content .section ol,.rst-content ol.arabic,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.wy-plain-list-decimal li,.rst-content .section ol li,.rst-content ol.arabic li,article ol li{list-style:decimal;margin-left:24px}.wy-plain-list-decimal li p:last-child,.rst-content .section ol li p:last-child,.rst-content ol.arabic li p:last-child,article ol li p:last-child{margin-bottom:0}.wy-plain-list-decimal li ul,.rst-content .section ol li ul,.rst-content ol.arabic li ul,article ol li ul{margin-bottom:0}.wy-plain-list-decimal li ul li,.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,article ol li ul li{list-style:disc}.codeblock-example{border:1px solid #e1e4e5;border-bottom:none;padding:24px;padding-top:48px;font-weight:500;background:#fff;position:relative}.codeblock-example:after{content:"Example";position:absolute;top:0px;left:0px;background:#9B59B6;color:#fff;padding:6px 12px}.codeblock-example.prettyprint-example-only{border:1px solid #e1e4e5;margin-bottom:24px}.codeblock,pre.literal-block,.rst-content .literal-block,.rst-content pre.literal-block,div[class^='highlight']{border:1px solid #e1e4e5;padding:0px;overflow-x:auto;background:#fff;margin:1px 0 24px 0}.codeblock div[class^='highlight'],pre.literal-block div[class^='highlight'],.rst-content .literal-block div[class^='highlight'],div[class^='highlight'] div[class^='highlight']{border:none;background:none;margin:0}div[class^='highlight'] td.code{width:100%}.linenodiv pre{border-right:solid 1px #e6e9ea;margin:0;padding:12px 12px;font-family:Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;font-size:12px;line-height:1.5;color:#d9d9d9}div[class^='highlight'] pre{white-space:pre;margin:0;padding:12px 12px;font-family:Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;font-size:12px;line-height:1.5;display:block;overflow:auto;color:#404040}@media print{.codeblock,pre.literal-block,.rst-content .literal-block,.rst-content pre.literal-block,div[class^='highlight'],div[class^='highlight'] pre{white-space:pre-wrap}}.hll{background-color:#ffc;margin:0 -12px;padding:0 12px;display:block}.c{color:#998;font-style:italic}.err{color:#a61717;background-color:#e3d2d2}.k{font-weight:bold}.o{font-weight:bold}.cm{color:#998;font-style:italic}.cp{color:#999;font-weight:bold}.c1{color:#998;font-style:italic}.cs{color:#999;font-weight:bold;font-style:italic}.gd{color:#000;background-color:#fdd}.gd .x{color:#000;background-color:#faa}.ge{font-style:italic}.gr{color:#a00}.gh{color:#999}.gi{color:#000;background-color:#dfd}.gi .x{color:#000;background-color:#afa}.go{color:#888}.gp{color:#555}.gs{font-weight:bold}.gu{color:purple;font-weight:bold}.gt{color:#a00}.kc{font-weight:bold}.kd{font-weight:bold}.kn{font-weight:bold}.kp{font-weight:bold}.kr{font-weight:bold}.kt{color:#458;font-weight:bold}.m{color:#099}.s{color:#d14}.n{color:#333}.na{color:teal}.nb{color:#0086b3}.nc{color:#458;font-weight:bold}.no{color:teal}.ni{color:purple}.ne{color:#900;font-weight:bold}.nf{color:#900;font-weight:bold}.nn{color:#555}.nt{color:navy}.nv{color:teal}.ow{font-weight:bold}.w{color:#bbb}.mf{color:#099}.mh{color:#099}.mi{color:#099}.mo{color:#099}.sb{color:#d14}.sc{color:#d14}.sd{color:#d14}.s2{color:#d14}.se{color:#d14}.sh{color:#d14}.si{color:#d14}.sx{color:#d14}.sr{color:#009926}.s1{color:#d14}.ss{color:#990073}.bp{color:#999}.vc{color:teal}.vg{color:teal}.vi{color:teal}.il{color:#099}.gc{color:#999;background-color:#EAF2F5}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.wy-breadcrumbs li code,.wy-breadcrumbs li .rst-content tt,.rst-content .wy-breadcrumbs li tt{padding:5px;border:none;background:none}.wy-breadcrumbs li code.literal,.wy-breadcrumbs li .rst-content tt.literal,.rst-content .wy-breadcrumbs li tt.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width: 480px){.wy-breadcrumbs-extra{display:none}.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:before,.wy-menu-horiz:after{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz ul,.wy-menu-horiz li{display:inline-block}.wy-menu-horiz li:hover{background:rgba(255,255,255,0.1)}.wy-menu-horiz li.divide-left{border-left:solid 1px #404040}.wy-menu-horiz li.divide-right{border-right:solid 1px #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{height:32px;display:inline-block;line-height:32px;padding:0 1.618em;margin-bottom:0;display:block;font-weight:bold;text-transform:uppercase;font-size:80%;color:#555;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:solid 1px #404040}.wy-menu-vertical li.divide-bottom{border-bottom:solid 1px #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:gray;border-right:solid 1px #c9c9c9;padding:0.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.wy-menu-vertical li code,.wy-menu-vertical li .rst-content tt,.rst-content .wy-menu-vertical li tt{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:0.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a{color:#404040;padding:0.4045em 1.618em;font-weight:bold;position:relative;background:#fcfcfc;border:none;border-bottom:solid 1px #c9c9c9;border-top:solid 1px #c9c9c9;padding-left:1.618em -4px}.wy-menu-vertical li.on a:hover,.wy-menu-vertical li.current>a:hover{background:#fcfcfc}.wy-menu-vertical li.on a:hover span.toctree-expand,.wy-menu-vertical li.current>a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand{display:block;font-size:0.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current li.toctree-l2>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>ul{display:none}.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3.current>ul{display:block}.wy-menu-vertical li.toctree-l2.current>a{background:#c9c9c9;padding:0.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{display:block;background:#c9c9c9;padding:0.4045em 4.045em}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3{font-size:0.9em}.wy-menu-vertical li.toctree-l3.current>a{background:#bdbdbd;padding:0.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{display:block;background:#bdbdbd;padding:0.4045em 5.663em;border-top:none;border-bottom:none}.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.toctree-l4{font-size:0.9em}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical .local-toc li ul{display:block}.wy-menu-vertical li ul li a{margin-bottom:0;color:#b3b3b3;font-weight:normal}.wy-menu-vertical a{display:inline-block;line-height:18px;padding:0.4045em 1.618em;display:block;position:relative;font-size:90%;color:#b3b3b3}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#b3b3b3}.wy-menu-vertical a:active{background-color:#2980B9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:0.809em;margin-bottom:0.809em;z-index:200;background-color:#2980B9;text-align:center;padding:0.809em;display:block;color:#fcfcfc;margin-bottom:0.809em}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto 0.809em auto;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a{color:#fcfcfc;font-size:100%;font-weight:bold;display:inline-block;padding:4px 6px;margin-bottom:0.809em}.wy-side-nav-search>a:hover,.wy-side-nav-search .wy-dropdown>a:hover{background:rgba(255,255,255,0.1)}.wy-side-nav-search>a img.logo,.wy-side-nav-search .wy-dropdown>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search>a.icon img.logo,.wy-side-nav-search .wy-dropdown>a.icon img.logo{margin-top:0.85em}.wy-side-nav-search>div.version{margin-top:-0.4045em;margin-bottom:0.809em;font-weight:normal;color:rgba(255,255,255,0.3)}.wy-nav .wy-menu-vertical header{color:#2980B9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980B9;color:#fff}[data-menu-wrap]{-webkit-transition:all 0.2s ease-in;-moz-transition:all 0.2s ease-in;transition:all 0.2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:left repeat-y #fcfcfc;background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAyRpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoTWFjaW50b3NoKSIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDoxOERBMTRGRDBFMUUxMUUzODUwMkJCOThDMEVFNURFMCIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoxOERBMTRGRTBFMUUxMUUzODUwMkJCOThDMEVFNURFMCI+IDx4bXBNTTpEZXJpdmVkRnJvbSBzdFJlZjppbnN0YW5jZUlEPSJ4bXAuaWlkOjE4REExNEZCMEUxRTExRTM4NTAyQkI5OEMwRUU1REUwIiBzdFJlZjpkb2N1bWVudElEPSJ4bXAuZGlkOjE4REExNEZDMEUxRTExRTM4NTAyQkI5OEMwRUU1REUwIi8+IDwvcmRmOkRlc2NyaXB0aW9uPiA8L3JkZjpSREY+IDwveDp4bXBtZXRhPiA8P3hwYWNrZXQgZW5kPSJyIj8+EwrlwAAAAA5JREFUeNpiMDU0BAgwAAE2AJgB9BnaAAAAAElFTkSuQmCC);background-size:300px 1px}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980B9;color:#fff;padding:0.4045em 0.809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:before,.wy-nav-top:after{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:bold}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,0.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:#999}footer p{margin-bottom:12px}footer span.commit code,footer span.commit .rst-content tt,.rst-content footer span.commit tt{padding:0px;font-family:Consolas,"Andale Mono WT","Andale Mono","Lucida Console","Lucida Sans Typewriter","DejaVu Sans Mono","Bitstream Vera Sans Mono","Liberation Mono","Nimbus Mono L",Monaco,"Courier New",Courier,monospace;font-size:1em;background:none;border:none;color:#999}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:before,.rst-footer-buttons:after{display:table;content:""}.rst-footer-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:solid 1px #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:solid 1px #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:gray;font-size:90%}@media screen and (max-width: 768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-side-scroll{width:auto}.wy-side-nav-search{width:auto}.wy-menu.wy-menu-vertical{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width: 1400px){.wy-nav-content-wrap{background:rgba(0,0,0,0.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,footer,.wy-nav-side{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;border-top:solid 10px #343131;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .icon{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}img{width:100%;height:auto}}.rst-content img{max-width:100%;height:auto !important}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure.align-center{text-align:center}.rst-content .section>img,.rst-content .section>a>img{margin-bottom:24px}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content .note .last,.rst-content .attention .last,.rst-content .caution .last,.rst-content .danger .last,.rst-content .error .last,.rst-content .hint .last,.rst-content .important .last,.rst-content .tip .last,.rst-content .warning .last,.rst-content .seealso .last,.rst-content .admonition-todo .last{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,0.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent !important;border-color:rgba(0,0,0,0.1) !important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha li{list-style:upper-alpha}.rst-content .section ol p,.rst-content .section ul p{margin-bottom:12px}.rst-content .line-block{margin-left:24px}.rst-content .topic-title{font-weight:bold;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0px 0px 24px 24px}.rst-content .align-left{float:left;margin:0px 24px 24px 0px}.rst-content .align-center{margin:auto;display:block}.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content .toctree-wrapper p.caption .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink{display:none;visibility:hidden;font-size:14px}.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content .toctree-wrapper p.caption .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content p.caption .headerlink:after{visibility:visible;content:"";font-family:FontAwesome;display:inline-block}.rst-content h1:hover .headerlink,.rst-content h2:hover .headerlink,.rst-content .toctree-wrapper p.caption:hover .headerlink,.rst-content h3:hover .headerlink,.rst-content h4:hover .headerlink,.rst-content h5:hover .headerlink,.rst-content h6:hover .headerlink,.rst-content dl dt:hover .headerlink,.rst-content p.caption:hover .headerlink{display:inline-block}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:solid 1px #e1e4e5}.rst-content .sidebar p,.rst-content .sidebar ul,.rst-content .sidebar dl{font-size:90%}.rst-content .sidebar .last{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif;font-weight:bold;background:#e1e4e5;padding:6px 12px;margin:-24px;margin-bottom:24px;font-size:100%}.rst-content .highlighted{background:#F1C40F;display:inline-block;font-weight:bold;padding:0 6px}.rst-content .footnote-reference,.rst-content .citation-reference{vertical-align:super;font-size:90%}.rst-content table.docutils.citation,.rst-content table.docutils.footnote{background:none;border:none;color:#999}.rst-content table.docutils.citation td,.rst-content table.docutils.citation tr,.rst-content table.docutils.footnote td,.rst-content table.docutils.footnote tr{border:none;background-color:transparent !important;white-space:normal}.rst-content table.docutils.citation td.label,.rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}.rst-content table.docutils.citation tt,.rst-content table.docutils.citation code,.rst-content table.docutils.footnote tt,.rst-content table.docutils.footnote code{color:#555}.rst-content table.field-list{border:none}.rst-content table.field-list td{border:none;padding-top:5px}.rst-content table.field-list td>strong{display:inline-block;margin-top:3px}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left;padding-left:0}.rst-content tt,.rst-content tt,.rst-content code{color:#000;padding:2px 5px}.rst-content tt big,.rst-content tt em,.rst-content tt big,.rst-content code big,.rst-content tt em,.rst-content code em{font-size:100% !important;line-height:normal}.rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal{color:#E74C3C}.rst-content tt.xref,a .rst-content tt,.rst-content tt.xref,.rst-content code.xref,a .rst-content tt,a .rst-content code{font-weight:bold;color:#404040}.rst-content a tt,.rst-content a tt,.rst-content a code{color:#2980B9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:bold}.rst-content dl p,.rst-content dl table,.rst-content dl ul,.rst-content dl ol{margin-bottom:12px !important}.rst-content dl dd{margin:0 0 12px 24px}.rst-content dl:not(.docutils){margin-bottom:24px}.rst-content dl:not(.docutils) dt{display:inline-block;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980B9;border-top:solid 3px #6ab0de;padding:6px;position:relative}.rst-content dl:not(.docutils) dt:before{color:#6ab0de}.rst-content dl:not(.docutils) dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dl dt{margin-bottom:6px;border:none;border-left:solid 3px #ccc;background:#f0f0f0;color:#555}.rst-content dl:not(.docutils) dl dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dt:first-child{margin-top:0}.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) code{font-weight:bold}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) code.descclassname{background-color:transparent;border:none;padding:0;font-size:100% !important}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname{font-weight:bold}.rst-content dl:not(.docutils) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:bold}.rst-content dl:not(.docutils) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-link,.rst-content .viewcode-back{display:inline-block;color:#27AE60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:bold}.rst-content tt.download,.rst-content code.download{background:inherit;padding:inherit;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{margin-right:4px}@media screen and (max-width: 480px){.rst-content .sidebar{width:100%}}span[id*='MathJax-Span']{color:#404040}.math{text-align:center}@font-face{font-family:"Inconsolata";font-style:normal;font-weight:400;src:local("Inconsolata"),local("Inconsolata-Regular"),url(../fonts/Inconsolata-Regular.ttf) format("truetype")}@font-face{font-family:"Inconsolata";font-style:normal;font-weight:700;src:local("Inconsolata Bold"),local("Inconsolata-Bold"),url(../fonts/Inconsolata-Bold.ttf) format("truetype")}@font-face{font-family:"Lato";font-style:normal;font-weight:400;src:local("Lato Regular"),local("Lato-Regular"),url(../fonts/Lato-Regular.ttf) format("truetype")}@font-face{font-family:"Lato";font-style:normal;font-weight:700;src:local("Lato Bold"),local("Lato-Bold"),url(../fonts/Lato-Bold.ttf) format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:400;src:local("Roboto Slab Regular"),local("RobotoSlab-Regular"),url(../fonts/RobotoSlab-Regular.ttf) format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:700;src:local("Roboto Slab Bold"),local("RobotoSlab-Bold"),url(../fonts/RobotoSlab-Bold.ttf) format("truetype")} -/*# sourceMappingURL=theme.css.map */ diff --git a/dev/doc/_static/doctools.js b/dev/doc/_static/doctools.js deleted file mode 100644 index 816349563588e87ca99c7cf2d6e54268e52e761d..0000000000000000000000000000000000000000 --- a/dev/doc/_static/doctools.js +++ /dev/null @@ -1,287 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s == 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node) { - if (node.nodeType == 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { - var span = document.createElement("span"); - span.className = className; - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this); - }); - } - } - return this.each(function() { - highlight(this); - }); -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated == 'undefined') - return string; - return (typeof translated == 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated == 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) == 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this == '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keyup(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); \ No newline at end of file diff --git a/dev/doc/_static/down-pressed.png b/dev/doc/_static/down-pressed.png deleted file mode 100644 index 5756c8cad8854722893dc70b9eb4bb0400343a39..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/down-pressed.png and /dev/null differ diff --git a/dev/doc/_static/down.png b/dev/doc/_static/down.png deleted file mode 100644 index 1b3bdad2ceffae91cee61b32f3295f9bbe646e48..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/down.png and /dev/null differ diff --git a/dev/doc/_static/file.png b/dev/doc/_static/file.png deleted file mode 100644 index a858a410e4faa62ce324d814e4b816fff83a6fb3..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/file.png and /dev/null differ diff --git a/dev/doc/_static/fonts/Inconsolata-Bold.ttf b/dev/doc/_static/fonts/Inconsolata-Bold.ttf deleted file mode 100644 index 58c9fef3a01c899867e280f49283fbb8e57d631d..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/Inconsolata-Bold.ttf and /dev/null differ diff --git a/dev/doc/_static/fonts/Inconsolata-Regular.ttf b/dev/doc/_static/fonts/Inconsolata-Regular.ttf deleted file mode 100644 index a87ffba6bef48195c8cf4e3ccb42ea77034f7cbc..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/Inconsolata-Regular.ttf and /dev/null differ diff --git a/dev/doc/_static/fonts/Lato-Bold.ttf b/dev/doc/_static/fonts/Lato-Bold.ttf deleted file mode 100644 index 74343694e2b2114272f38b1124813b972cb592e5..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/Lato-Bold.ttf and /dev/null differ diff --git a/dev/doc/_static/fonts/Lato-Regular.ttf b/dev/doc/_static/fonts/Lato-Regular.ttf deleted file mode 100644 index 04ea8efb1367727b081dea87e63818be0a4d02f0..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/Lato-Regular.ttf and /dev/null differ diff --git a/dev/doc/_static/fonts/RobotoSlab-Bold.ttf b/dev/doc/_static/fonts/RobotoSlab-Bold.ttf deleted file mode 100644 index df5d1df2730433013f41bf2698cbe249b075aa02..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/RobotoSlab-Bold.ttf and /dev/null differ diff --git a/dev/doc/_static/fonts/RobotoSlab-Regular.ttf b/dev/doc/_static/fonts/RobotoSlab-Regular.ttf deleted file mode 100644 index eb52a7907362cc3392eb74892883f5d9e260b638..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/RobotoSlab-Regular.ttf and /dev/null differ diff --git a/dev/doc/_static/fonts/fontawesome-webfont.eot b/dev/doc/_static/fonts/fontawesome-webfont.eot deleted file mode 100644 index 84677bc0c5f37f1fac9d87548c4554b5c91717cf..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/dev/doc/_static/fonts/fontawesome-webfont.svg b/dev/doc/_static/fonts/fontawesome-webfont.svg deleted file mode 100644 index d907b25ae60ec7e3d32e4027aa6e6b7595de97af..0000000000000000000000000000000000000000 --- a/dev/doc/_static/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,520 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/dev/doc/_static/fonts/fontawesome-webfont.ttf b/dev/doc/_static/fonts/fontawesome-webfont.ttf deleted file mode 100644 index 96a3639cdde5e8ab459c6380e3b9524ee81641dc..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/dev/doc/_static/fonts/fontawesome-webfont.woff b/dev/doc/_static/fonts/fontawesome-webfont.woff deleted file mode 100644 index 628b6a52a87e62c6f22426e17c01f6a303aa194e..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/dev/doc/_static/images/PP_w.png b/dev/doc/_static/images/PP_w.png deleted file mode 100644 index bc58b0b458135773fcde5ee941ea095e3d4d07a0..0000000000000000000000000000000000000000 Binary files a/dev/doc/_static/images/PP_w.png and /dev/null differ diff --git a/dev/doc/_static/jquery-3.1.0.js b/dev/doc/_static/jquery-3.1.0.js deleted file mode 100644 index f2fc2747874e38d72f12812ed38418bc21935608..0000000000000000000000000000000000000000 --- a/dev/doc/_static/jquery-3.1.0.js +++ /dev/null @@ -1,10074 +0,0 @@ -/*eslint-disable no-unused-vars*/ -/*! - * jQuery JavaScript Library v3.1.0 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2016-07-07T21:44Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var document = window.document; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var concat = arr.concat; - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - - - - function DOMEval( code, doc ) { - doc = doc || document; - - var script = doc.createElement( "script" ); - - script.text = code; - doc.head.appendChild( script ).parentNode.removeChild( script ); - } -/* global Symbol */ -// Defining this global in .eslintrc would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.1.0", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }, - - // Support: Android <=4.0 only - // Make sure we trim BOM and NBSP - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - return num != null ? - - // Return just the one element from the set - ( num < 0 ? this[ num + this.length ] : this[ num ] ) : - - // Return all the elements in a clean array - slice.call( this ); - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = jQuery.isArray( copy ) ) ) ) { - - if ( copyIsArray ) { - copyIsArray = false; - clone = src && jQuery.isArray( src ) ? src : []; - - } else { - clone = src && jQuery.isPlainObject( src ) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isFunction: function( obj ) { - return jQuery.type( obj ) === "function"; - }, - - isArray: Array.isArray, - - isWindow: function( obj ) { - return obj != null && obj === obj.window; - }, - - isNumeric: function( obj ) { - - // As of jQuery 3.0, isNumeric is limited to - // strings and numbers (primitives or objects) - // that can be coerced to finite numbers (gh-2662) - var type = jQuery.type( obj ); - return ( type === "number" || type === "string" ) && - - // parseFloat NaNs numeric-cast false positives ("") - // ...but misinterprets leading-number strings, particularly hex literals ("0x...") - // subtraction forces infinities to NaN - !isNaN( obj - parseFloat( obj ) ); - }, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - - /* eslint-disable no-unused-vars */ - // See https://github.com/eslint/eslint/issues/6125 - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - type: function( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; - }, - - // Evaluates a script in a global context - globalEval: function( code ) { - DOMEval( code ); - }, - - // Convert dashed to camelCase; used by the css and data modules - // Support: IE <=9 - 11, Edge 12 - 13 - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // Support: Android <=4.0 only - trim: function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var tmp, args, proxy; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - now: Date.now, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = jQuery.type( obj ); - - if ( type === "function" || jQuery.isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.0 - * https://sizzlejs.com/ - * - * Copyright jQuery Foundation and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2016-01-04 - */ -(function( window ) { - -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + - "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox<24 - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - high < 0 ? - // BMP codepoint - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\x80-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - disabledAncestor = addCombinator( - function( elem ) { - return elem.disabled === true; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { - - // ID selector - if ( (m = match[1]) ) { - - // Document context - if ( nodeType === 9 ) { - if ( (elem = context.getElementById( m )) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && (elem = newContext.getElementById( m )) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( (m = match[3]) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !compilerCache[ selector + " " ] && - (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - - if ( nodeType !== 1 ) { - newContext = context; - newSelector = selector; - - // qSA looks outside Element context, which is not what we want - // Thanks to Andrew Dupont for this workaround technique - // Support: IE <=8 - // Exclude object elements - } else if ( context.nodeName.toLowerCase() !== "object" ) { - - // Capture the context ID, setting it first if necessary - if ( (nid = context.getAttribute( "id" )) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", (nid = expando) ); - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[i] = "#" + nid + " " + toSelector( groups[i] ); - } - newSelector = groups.join( "," ); - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key + " " ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement("fieldset"); - - try { - return !!fn( el ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - // Known :disabled false positives: - // IE: *[disabled]:not(button, input, select, textarea, optgroup, option, menuitem, fieldset) - // not IE: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Check form elements and option elements for explicit disabling - return "label" in elem && elem.disabled === disabled || - "form" in elem && elem.disabled === disabled || - - // Check non-disabled form elements for fieldset[disabled] ancestors - "form" in elem && elem.disabled === false && ( - // Support: IE6-11+ - // Ancestry is covered for us - elem.isDisabled === disabled || - - // Otherwise, assume any non-