From 8ce902541cb9c9010bda22bf3900688c5350536e Mon Sep 17 00:00:00 2001 From: xsrobin <50069408+xsrobin@users.noreply.github.com> Date: Fri, 2 Aug 2019 19:41:35 +0800 Subject: [PATCH] fix unalign of some examples (#18943) * test=develop test=document_preview * Update API.spec --- paddle/fluid/API.spec | 14 ++-- python/paddle/fluid/initializer.py | 70 +++++++++---------- .../fluid/layers/learning_rate_scheduler.py | 16 ++--- python/paddle/fluid/layers/nn.py | 68 +++++++++--------- 4 files changed, 84 insertions(+), 84 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 8880da2e1a..05dd0cd33e 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -79,18 +79,18 @@ paddle.fluid.initializer.ConstantInitializer ('paddle.fluid.initializer.Constant paddle.fluid.initializer.ConstantInitializer.__init__ (ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.initializer.UniformInitializer ('paddle.fluid.initializer.UniformInitializer', ('document', 'a8f1177e4ce29766853e801d5b0a3635')) paddle.fluid.initializer.UniformInitializer.__init__ (ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.initializer.NormalInitializer ('paddle.fluid.initializer.NormalInitializer', ('document', '2171207fb07293603e0fd2ff01234b3e')) +paddle.fluid.initializer.NormalInitializer ('paddle.fluid.initializer.NormalInitializer', ('document', '279a0d89bf01138fbf4c4ba14f22099b')) paddle.fluid.initializer.NormalInitializer.__init__ (ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.initializer.TruncatedNormalInitializer ('paddle.fluid.initializer.TruncatedNormalInitializer', ('document', 'b8e90aad6ee5687cb5f2b6fd404370d1')) paddle.fluid.initializer.TruncatedNormalInitializer.__init__ (ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.initializer.XavierInitializer ('paddle.fluid.initializer.XavierInitializer', ('document', '3d5676f1a5414aa0c815d793a795ccb3')) paddle.fluid.initializer.XavierInitializer.__init__ (ArgSpec(args=['self', 'uniform', 'fan_in', 'fan_out', 'seed'], varargs=None, keywords=None, defaults=(True, None, None, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.initializer.BilinearInitializer ('paddle.fluid.initializer.BilinearInitializer', ('document', '5646a5cd44f0c9111344d13f46d31169')) +paddle.fluid.initializer.BilinearInitializer ('paddle.fluid.initializer.BilinearInitializer', ('document', '8a40b54fe33c19c3edcf6624ffae5d03')) paddle.fluid.initializer.BilinearInitializer.__init__ (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'd389912dc079cbef432335a00017cec0')) -paddle.fluid.initializer.MSRAInitializer ('paddle.fluid.initializer.MSRAInitializer', ('document', 'ecfadb28c52d01496d107835a69ec3f9')) +paddle.fluid.initializer.MSRAInitializer ('paddle.fluid.initializer.MSRAInitializer', ('document', 'b99e0ee95e2fd02640cb4b08a7ae80cc')) paddle.fluid.initializer.MSRAInitializer.__init__ (ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0)), ('document', '53c757bed9345f2ad3361902531e7cf5')) -paddle.fluid.initializer.force_init_on_cpu (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '53c01b661feb8e60d0efa2066976c1a8')) -paddle.fluid.initializer.init_on_cpu (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '68bebc3963526880a07c98a5d6226794')) +paddle.fluid.initializer.force_init_on_cpu (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', '5f55553caf939d270c7fe8dc418084b2')) +paddle.fluid.initializer.init_on_cpu (ArgSpec(args=[], varargs=None, keywords=None, defaults=None), ('document', 'eaa04fd68661a3af59abd0e19b3b6eda')) paddle.fluid.initializer.NumpyArrayInitializer ('paddle.fluid.initializer.NumpyArrayInitializer', ('document', '064f134a27c16372967d450f499762ab')) paddle.fluid.initializer.NumpyArrayInitializer.__init__ (ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.fc (ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None)), ('document', '1c74f52549814235077ecc34856a95eb')) @@ -148,7 +148,7 @@ paddle.fluid.layers.warpctc (ArgSpec(args=['input', 'label', 'blank', 'norm_by_t paddle.fluid.layers.sequence_reshape (ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None), ('document', 'f568714a876425004aca4ea2d4a27701')) paddle.fluid.layers.transpose (ArgSpec(args=['x', 'perm', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '8e72db173d4c082e27cb11f31d8c9bfa')) paddle.fluid.layers.im2sequence (ArgSpec(args=['input', 'filter_size', 'stride', 'padding', 'input_image_size', 'out_stride', 'name'], varargs=None, keywords=None, defaults=(1, 1, 0, None, 1, None)), ('document', '33134416fc27dd65a767e5f15116ee16')) -paddle.fluid.layers.nce (ArgSpec(args=['input', 'label', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'name', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 'uniform', None, 0, False)), ('document', '11a544a6e3fd0482509712dd54377fa1')) +paddle.fluid.layers.nce (ArgSpec(args=['input', 'label', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples', 'name', 'sampler', 'custom_dist', 'seed', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, 'uniform', None, 0, False)), ('document', '83d4ca6dfb957912807f535756e76992')) paddle.fluid.layers.sampled_softmax_with_cross_entropy (ArgSpec(args=['logits', 'label', 'num_samples', 'num_true', 'remove_accidental_hits', 'use_customized_samples', 'customized_samples', 'customized_probabilities', 'seed'], varargs=None, keywords=None, defaults=(1, True, False, None, None, 0)), ('document', 'd4435a63d34203339831ee6a86ef9242')) paddle.fluid.layers.hsigmoid (ArgSpec(args=['input', 'label', 'num_classes', 'param_attr', 'bias_attr', 'name', 'path_table', 'path_code', 'is_custom', 'is_sparse'], varargs=None, keywords=None, defaults=(None, None, None, None, None, False, False)), ('document', 'b83e7dfa81059b39bb137922dc914f50')) paddle.fluid.layers.beam_search (ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 'scores', 'beam_size', 'end_id', 'level', 'is_accumulated', 'name', 'return_parent_idx'], varargs=None, keywords=None, defaults=(0, True, None, False)), ('document', '1270395ce97a4e1b556104abbb14f096')) @@ -409,7 +409,7 @@ paddle.fluid.layers.inverse_time_decay (ArgSpec(args=['learning_rate', 'decay_st paddle.fluid.layers.polynomial_decay (ArgSpec(args=['learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False)), ('document', 'a343254c36c2e89512cd8cd8a1960ead')) paddle.fluid.layers.piecewise_decay (ArgSpec(args=['boundaries', 'values'], varargs=None, keywords=None, defaults=None), ('document', 'd9f654117542c6b702963dda107a247f')) paddle.fluid.layers.noam_decay (ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None), ('document', 'fd57228fb76195e66bbcc8d8e42c494d')) -paddle.fluid.layers.cosine_decay (ArgSpec(args=['learning_rate', 'step_each_epoch', 'epochs'], varargs=None, keywords=None, defaults=None), ('document', 'f0d65d8c89d0fe78051ca689daa15e35')) +paddle.fluid.layers.cosine_decay (ArgSpec(args=['learning_rate', 'step_each_epoch', 'epochs'], varargs=None, keywords=None, defaults=None), ('document', '1062e487dd3b50a6e58b5703b4f594c9')) paddle.fluid.layers.linear_lr_warmup (ArgSpec(args=['learning_rate', 'warmup_steps', 'start_lr', 'end_lr'], varargs=None, keywords=None, defaults=None), ('document', 'dc7292c456847ba41cfd318e9f7f4363')) paddle.fluid.layers.Uniform ('paddle.fluid.layers.distributions.Uniform', ('document', 'af70e7003f437e7a8a9e28cded35c433')) paddle.fluid.layers.Uniform.__init__ (ArgSpec(args=['self', 'low', 'high'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index a5a50732a4..47d8c67a96 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -42,10 +42,10 @@ def force_init_on_cpu(): .. code-block:: python - import paddle.fluid as fluid - if fluid.initializer.force_init_on_cpu(): - step = fluid.layers.create_global_var( - shape=[2,3], value=1.0, dtype='float32') + import paddle.fluid as fluid + if fluid.initializer.force_init_on_cpu(): + step = fluid.layers.create_global_var( + shape=[2,3], value=1.0, dtype='float32') """ return _force_init_on_cpu_ @@ -59,10 +59,10 @@ def init_on_cpu(): Examples: .. code-block:: python - import paddle.fluid as fluid - with fluid.initializer.init_on_cpu(): - step = fluid.layers.create_global_var( - shape=[2,3], value=1.0, dtype='float32') + import paddle.fluid as fluid + with fluid.initializer.init_on_cpu(): + step = fluid.layers.create_global_var( + shape=[2,3], value=1.0, dtype='float32') """ global _force_init_on_cpu_ @@ -295,10 +295,10 @@ class NormalInitializer(Initializer): Examples: .. code-block:: python - import paddle.fluid as fluid - x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") - fc = fluid.layers.fc(input=x, size=10, - param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0)) + import paddle.fluid as fluid + x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=x, size=10, + param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0)) """ @@ -611,11 +611,11 @@ class MSRAInitializer(Initializer): Examples: .. code-block:: python - - import paddle.fluid as fluid - x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") - fc = fluid.layers.fc(input=x, size=10, - param_attr=fluid.initializer.MSRA(uniform=False)) + + import paddle.fluid as fluid + x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=x, size=10, + param_attr=fluid.initializer.MSRA(uniform=False)) """ @@ -715,25 +715,25 @@ class BilinearInitializer(Initializer): .. code-block:: python - import paddle.fluid as fluid - factor = 2 - C = 2 - w_attr = fluid.param_attr.ParamAttr( - learning_rate=0., - regularizer=fluid.regularizer.L2Decay(0.), + import paddle.fluid as fluid + factor = 2 + C = 2 + w_attr = fluid.param_attr.ParamAttr( + learning_rate=0., + regularizer=fluid.regularizer.L2Decay(0.), initializer=fluid.initializer.Bilinear()) - x = fluid.layers.data(name="data", shape=[3, 32, 32], - dtype="float32") - conv_up = fluid.layers.conv2d_transpose( - input=x, - num_filters=C, - output_size=None, - filter_size=2 * factor - factor % 2, - padding=int(math.ceil((factor - 1) / 2.)), - stride=factor, - groups=C, - param_attr=w_attr, - bias_attr=False) + x = fluid.layers.data(name="data", shape=[3, 32, 32], + dtype="float32") + conv_up = fluid.layers.conv2d_transpose( + input=x, + num_filters=C, + output_size=None, + filter_size=2 * factor - factor % 2, + padding=int(math.ceil((factor - 1) / 2.)), + stride=factor, + groups=C, + param_attr=w_attr, + bias_attr=False) Where, `num_filters=C` and `groups=C` means this is channel-wise transposed convolution. The filter shape will be (C, 1, K, K) where K is `filer_size`, diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 20d9443861..4a64c215e8 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -405,23 +405,23 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): .. math:: - decayed\_lr = learning\_rate * 0.5 * (math.cos * (epoch * \\frac{math.pi}{epochs} ) + 1) - + decayed\_lr = learning\_rate * 0.5 * (math.cos * (epoch * \\frac{math.pi}{epochs} ) + 1) + Args: learning_rate(Variable|float): The initial learning rate. step_each_epoch(int): the number of steps in an epoch. epochs(int): the number of epochs. Returns: - Variable: The decayed learning rate. + Variable: The decayed learning rate. Examples: - .. code-block:: python + .. code-block:: python - import paddle.fluid as fluid - base_lr = 0.1 - lr = fluid.layers.cosine_decay( - learning_rate = base_lr, step_each_epoch=10000, epochs=120) + import paddle.fluid as fluid + base_lr = 0.1 + lr = fluid.layers.cosine_decay( + learning_rate = base_lr, step_each_epoch=10000, epochs=120) """ with default_main_program()._lr_schedule_guard(): diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index ba3d3dd473..7456fccdb5 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -5811,40 +5811,40 @@ def nce(input, .. code-block:: python - import paddle.fluid as fluid - import numpy as np - - window_size = 5 - words = [] - for i in xrange(window_size): - words.append(fluid.layers.data( - name='word_{0}'.format(i), shape=[1], dtype='int64')) - - dict_size = 10000 - label_word = int(window_size / 2) + 1 - - embs = [] - for i in xrange(window_size): - if i == label_word: - continue - - emb = fluid.layers.embedding(input=words[i], size=[dict_size, 32], - param_attr='embed', is_sparse=True) - embs.append(emb) - - embs = fluid.layers.concat(input=embs, axis=1) - loss = fluid.layers.nce(input=embs, label=words[label_word], - num_total_classes=dict_size, param_attr='nce.w_0', - bias_attr='nce.b_0') - - #or use custom distribution - dist = np.array([0.05,0.5,0.1,0.3,0.05]) - loss = fluid.layers.nce(input=embs, label=words[label_word], - num_total_classes=5, param_attr='nce.w_1', - bias_attr='nce.b_1', - num_neg_samples=3, - sampler="custom_dist", - custom_dist=dist) + import paddle.fluid as fluid + import numpy as np + + window_size = 5 + words = [] + for i in xrange(window_size): + words.append(fluid.layers.data( + name='word_{0}'.format(i), shape=[1], dtype='int64')) + + dict_size = 10000 + label_word = int(window_size / 2) + 1 + + embs = [] + for i in xrange(window_size): + if i == label_word: + continue + + emb = fluid.layers.embedding(input=words[i], size=[dict_size, 32], + param_attr='embed', is_sparse=True) + embs.append(emb) + + embs = fluid.layers.concat(input=embs, axis=1) + loss = fluid.layers.nce(input=embs, label=words[label_word], + num_total_classes=dict_size, param_attr='nce.w_0', + bias_attr='nce.b_0') + + #or use custom distribution + dist = np.array([0.05,0.5,0.1,0.3,0.05]) + loss = fluid.layers.nce(input=embs, label=words[label_word], + num_total_classes=5, param_attr='nce.w_1', + bias_attr='nce.b_1', + num_neg_samples=3, + sampler="custom_dist", + custom_dist=dist) """ helper = LayerHelper('nce', **locals()) assert isinstance(input, Variable) -- GitLab