From 905b90d7efde78f3a05ae5000338a352c37f03a5 Mon Sep 17 00:00:00 2001 From: qijun Date: Wed, 29 Mar 2017 11:03:15 +0800 Subject: [PATCH] format code --- doc/api/v2/data.rst | 4 ++-- doc/api/v2/run_logic.rst | 5 +++-- python/paddle/v2/dataset/cifar.py | 6 +++--- python/paddle/v2/dataset/conll05.py | 10 +++++----- python/paddle/v2/dataset/imdb.py | 12 ++++++------ python/paddle/v2/dataset/imikolov.py | 4 ++-- python/paddle/v2/dataset/movielens.py | 6 +++--- python/paddle/v2/dataset/uci_housing.py | 4 ++-- python/paddle/v2/dataset/wmt14.py | 6 +++--- python/paddle/v2/inference.py | 2 +- python/paddle/v2/optimizer.py | 2 +- python/paddle/v2/trainer.py | 2 +- 12 files changed, 32 insertions(+), 31 deletions(-) diff --git a/doc/api/v2/data.rst b/doc/api/v2/data.rst index 69fdea79b16..b011bd959de 100644 --- a/doc/api/v2/data.rst +++ b/doc/api/v2/data.rst @@ -1,5 +1,5 @@ ================================== -Data Reader Inferface and DataSets +Data Reader Inferface and DataSets ================================== @@ -78,7 +78,7 @@ imikolov :noindex: movielens -+++++++++ ++++++++++ .. automodule:: paddle.v2.dataset.movielens :members: diff --git a/doc/api/v2/run_logic.rst b/doc/api/v2/run_logic.rst index 1b3d23d1e8f..5c97651f653 100644 --- a/doc/api/v2/run_logic.rst +++ b/doc/api/v2/run_logic.rst @@ -20,11 +20,12 @@ Event ===== .. automodule:: paddle.v2.event - :members: + :members: :noindex: Inference ========= .. autofunction:: paddle.v2.infer - :noindex: \ No newline at end of file + :noindex: + \ No newline at end of file diff --git a/python/paddle/v2/dataset/cifar.py b/python/paddle/v2/dataset/cifar.py index adcf8fbe763..8a2642d8dd8 100644 --- a/python/paddle/v2/dataset/cifar.py +++ b/python/paddle/v2/dataset/cifar.py @@ -17,11 +17,11 @@ CIFAR dataset. This module will download dataset from https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into paddle reader creators. -The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 +The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. -The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes containing -600 images each. There are 500 training images and 100 testing images per class. +The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes containing +600 images each. There are 500 training images and 100 testing images per class. """ diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py index d9ea2d027fb..cf1ea8ac5a3 100644 --- a/python/paddle/v2/dataset/conll05.py +++ b/python/paddle/v2/dataset/conll05.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Conll05 dataset. -Paddle semantic role labeling Book and demo use this dataset as an example. Because -Conll05 is not free in public, the default downloaded URL is test set of -Conll05 (which is public). Users can change URL and MD5 to their Conll dataset. +Conll05 dataset. +Paddle semantic role labeling Book and demo use this dataset as an example. Because +Conll05 is not free in public, the default downloaded URL is test set of +Conll05 (which is public). Users can change URL and MD5 to their Conll dataset. And a pre-trained word vector model based on Wikipedia corpus is used to initialize SRL model. """ @@ -200,7 +200,7 @@ def test(): Conll05 test set creator. Because the train dataset is not free, the test dataset is used for training. - It returns a reader creator, each sample in the reader is nine features, including sentence + It returns a reader creator, each sample in the reader is nine features, including sentence sequence, predicate, predicate context, predicate context flag and tagged sequence. :return: Train reader creator diff --git a/python/paddle/v2/dataset/imdb.py b/python/paddle/v2/dataset/imdb.py index e363e21d953..0340f37f276 100644 --- a/python/paddle/v2/dataset/imdb.py +++ b/python/paddle/v2/dataset/imdb.py @@ -14,10 +14,10 @@ """ IMDB dataset. -This module download IMDB dataset from -http://ai.stanford.edu/%7Eamaas/data/sentiment/, which contains a set of 25,000 -highly polar movie reviews for training, and 25,000 for testing. Besides, this -module also provides API for build dictionary and parse train set and test set +This module download IMDB dataset from +http://ai.stanford.edu/%7Eamaas/data/sentiment/, which contains a set of 25,000 +highly polar movie reviews for training, and 25,000 for testing. Besides, this +module also provides API for build dictionary and parse train set and test set into paddle reader creators. """ @@ -122,7 +122,7 @@ def train(word_idx): """ IMDB train set creator. - It returns a reader creator, each sample in the reader is an index + It returns a reader creator, each sample in the reader is an index sequence and label in [0, 1]. :param word_idx: word dictionary @@ -139,7 +139,7 @@ def test(word_idx): """ IMDB test set creator. - It returns a reader creator, each sample in the reader is an index + It returns a reader creator, each sample in the reader is an index sequence and label in [0, 1]. :param word_idx: word dictionary diff --git a/python/paddle/v2/dataset/imikolov.py b/python/paddle/v2/dataset/imikolov.py index 6de5abe1079..917a0be8497 100644 --- a/python/paddle/v2/dataset/imikolov.py +++ b/python/paddle/v2/dataset/imikolov.py @@ -91,7 +91,7 @@ def train(word_idx, n): """ imikolov train set creator. - It returns a reader creator, each sample in the reader is an index + It returns a reader creator, each sample in the reader is an index tuple. :param word_idx: word dictionary @@ -108,7 +108,7 @@ def test(word_idx, n): """ imikolov test set creator. - It returns a reader creator, each sample in the reader is an index + It returns a reader creator, each sample in the reader is an index tuple. :param word_idx: word dictionary diff --git a/python/paddle/v2/dataset/movielens.py b/python/paddle/v2/dataset/movielens.py index 571868d8356..822f7f293df 100644 --- a/python/paddle/v2/dataset/movielens.py +++ b/python/paddle/v2/dataset/movielens.py @@ -14,9 +14,9 @@ """ Movielens 1-M dataset. -Movielens 1-M dataset contains 1 million ratings from 6000 users on 4000 movies, which was -collected by GroupLens Research. This module will download Movielens 1-M dataset from -http://files.grouplens.org/datasets/movielens/ml-1m.zip and parse train/test set +Movielens 1-M dataset contains 1 million ratings from 6000 users on 4000 movies, which was +collected by GroupLens Research. This module will download Movielens 1-M dataset from +http://files.grouplens.org/datasets/movielens/ml-1m.zip and parse train/test set into paddle reader creators. """ diff --git a/python/paddle/v2/dataset/uci_housing.py b/python/paddle/v2/dataset/uci_housing.py index 57dc4d223bb..3e358e4e8aa 100644 --- a/python/paddle/v2/dataset/uci_housing.py +++ b/python/paddle/v2/dataset/uci_housing.py @@ -14,7 +14,7 @@ """ UCI Housing dataset. -This module will download dataset from +This module will download dataset from https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and parse train/test set into paddle reader creators. """ @@ -75,7 +75,7 @@ def train(): """ UCI_HOUSING train set creator. - It returns a reader creator, each sample in the reader is features after normalization + It returns a reader creator, each sample in the reader is features after normalization and price number. :return: Train reader creator diff --git a/python/paddle/v2/dataset/wmt14.py b/python/paddle/v2/dataset/wmt14.py index 48c39547fd1..b02d7070951 100644 --- a/python/paddle/v2/dataset/wmt14.py +++ b/python/paddle/v2/dataset/wmt14.py @@ -14,7 +14,7 @@ """ WMT14 dataset. The original WMT14 dataset is too large and a small set of data for set is provided. -This module will download dataset from +This module will download dataset from http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz and parse train/test set into paddle reader creators. @@ -102,7 +102,7 @@ def train(dict_size): """ WMT14 train set creator. - It returns a reader creator, each sample in the reader is source language word index + It returns a reader creator, each sample in the reader is source language word index sequence, target language word index sequence and next word index sequence. :return: Train reader creator @@ -116,7 +116,7 @@ def test(dict_size): """ WMT14 test set creator. - It returns a reader creator, each sample in the reader is source language word index + It returns a reader creator, each sample in the reader is source language word index sequence, target language word index sequence and next word index sequence. :return: Train reader creator diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index 8d027b9e5bb..848546a1df4 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -49,7 +49,7 @@ class Inference(object): def iter_infer_field(self, field, **kwargs): for result in self.iter_infer(**kwargs): yield [each_result[field] for each_result in result] - + def infer(self, field='value', **kwargs): retv = None for result in self.iter_infer_field(field=field, **kwargs): diff --git a/python/paddle/v2/optimizer.py b/python/paddle/v2/optimizer.py index d153d30ea20..feefd7d758b 100644 --- a/python/paddle/v2/optimizer.py +++ b/python/paddle/v2/optimizer.py @@ -195,7 +195,7 @@ class AdaDelta(Optimizer): :param epsilon: :math:`\\rho` in equation :type epsilon: float """ - + def __init__(self, rho=0.95, epsilon=1e-06, **kwargs): learning_method = v1_optimizers.AdaDeltaOptimizer( rho=rho, epsilon=epsilon) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index 265f031532f..15fcefef45b 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -130,7 +130,7 @@ class SGD(object): Testing method. Will test input data. :param reader: A reader that reads and yeilds data items. - :type reader: collections.Iterable + :type reader: collections.Iterable :param feeding: Feeding is a map of neural network input name and array index that reader returns. :type feeding: dict -- GitLab