From 54c368db1e3b1b34fa6a65f5249a84d2811dd62a Mon Sep 17 00:00:00 2001 From: 123malin Date: Wed, 30 Sep 2020 18:37:29 +0800 Subject: [PATCH] [API 2.0: doc] fix doc of nonzero (#27685) * test=develop, update example --- python/paddle/fluid/optimizer.py | 48 +++++++++++-------- .../fluid/tests/unittests/test_nonzero_api.py | 8 ++++ python/paddle/optimizer/__init__.py | 14 +++--- python/paddle/tensor/search.py | 19 ++++---- 4 files changed, 49 insertions(+), 40 deletions(-) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 761f6409fe..4a9ce4454a 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -4884,29 +4884,35 @@ class LookaheadOptimizer(object): import paddle import paddle.fluid as fluid import numpy as np + import numpy.random as random - x = fluid.layers.data(name='x', shape=[2], dtype='float32') - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - y = fluid.layers.fc(input=[x], size=2, act="softmax") - loss = fluid.layers.cross_entropy(input=y, label=label) - loss = fluid.layers.mean(x=loss) - sgd = fluid.optimizer.SGD(learning_rate=0.01) - optimizer = fluid.optimizer.LookaheadOptimizer(sgd, - alpha=0.5, - k=5) - optimizer.minimize(loss) - main_program = fluid.default_main_program() - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - feeder = fluid.DataFeeder(feed_list=[x, label], place=place) + paddle.enable_static() + + x = fluid.layers.data(name='x', shape=[2], dtype='float32') + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + y = fluid.layers.fc(input=[x], size=2, act="softmax") + loss = fluid.layers.cross_entropy(input=y, label=label) + loss = fluid.layers.mean(x=loss) + sgd = fluid.optimizer.SGD(learning_rate=0.01) + optimizer = fluid.optimizer.LookaheadOptimizer(sgd, + alpha=0.5, + k=5) + optimizer.minimize(loss) + main_program = fluid.default_main_program() + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - step = 0 - while(step < 10): - step += 1 - exe.run(fluid.default_main_program(), - feed=feeder.feed(batch_data)) + def train_reader(limit=5): + for i in range(limit): + yield random.random([2]).astype('float32'), random.random([1]).astype('int64') + + feeder = fluid.DataFeeder(feed_list=[x, label], place=place) + reader = paddle.batch(paddle.reader.shuffle(train_reader, buf_size=50000),batch_size=1) + + for batch_data in reader(): + exe.run(fluid.default_main_program(), + feed=feeder.feed(batch_data)) """ diff --git a/python/paddle/fluid/tests/unittests/test_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_nonzero_api.py index 0e68f9d5be..8569be82db 100644 --- a/python/paddle/fluid/tests/unittests/test_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_nonzero_api.py @@ -76,6 +76,14 @@ class TestNonZeroAPI(unittest.TestCase): expect_out = np.array([[0], [1]]) self.assertTrue(np.allclose(expect_out, np.array(res))) + def test_dygraph_api(self): + data_x = np.array([[True, False], [False, True]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(data_x) + z = paddle.nonzero(x) + np_z = z.numpy() + expect_out = np.array([[0, 0], [1, 1]]) + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/optimizer/__init__.py b/python/paddle/optimizer/__init__.py index 30de88cc29..6f485e2e9d 100644 --- a/python/paddle/optimizer/__init__.py +++ b/python/paddle/optimizer/__init__.py @@ -15,19 +15,17 @@ __all__ = [ 'Adadelta', 'AdadeltaOptimizer', 'Adagrad', 'AdagradOptimizer', 'Adam', 'Adamax', 'AdamW', 'DecayedAdagrad', 'DecayedAdagradOptimizer', 'Dpsgd', - 'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'LookaheadOptimizer', - 'ModelAverage', 'Momentum', 'MomentumOptimizer', 'RMSProp', 'SGD', - 'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR', 'PiecewiseLR', - 'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR', 'LinearLrWarmup', - 'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR', 'ReduceLROnPlateau', - 'CosineAnnealingLR' + 'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'Momentum', 'MomentumOptimizer', + 'RMSProp', 'SGD', 'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR', + 'PiecewiseLR', 'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR', + 'LinearLrWarmup', 'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR', + 'ReduceLROnPlateau', 'CosineAnnealingLR' ] from ..fluid.optimizer import Momentum, Adagrad, Dpsgd, DecayedAdagrad, Ftrl,\ AdagradOptimizer, DpsgdOptimizer, DecayedAdagradOptimizer, \ - FtrlOptimizer, AdadeltaOptimizer, ModelAverage, \ - LookaheadOptimizer + FtrlOptimizer, AdadeltaOptimizer from .optimizer import Optimizer from .adam import Adam diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index f55d285586..19d8fc58b0 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -339,11 +339,8 @@ def index_select(x, index, axis=0, name=None): return out -def nonzero(input, as_tuple=False): +def nonzero(x, as_tuple=False): """ - :alias_main: paddle.nonzero - :alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero - Return a tensor containing the indices of all non-zero elements of the `input` tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension in `input`, each containing the indices (in that dimension) of all non-zero elements @@ -353,17 +350,17 @@ def nonzero(input, as_tuple=False): a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1]. Args: - inputs (Variable): The input tensor variable. + x (Tensor): The input tensor variable. as_tuple (bool): Return type, Tensor or tuple of Tensor. Returns: - Variable. The data type is int64. + Tensor. The data type is int64. Examples: + .. code-block:: python - import paddle - paddle.disable_static() + import paddle x1 = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], @@ -402,13 +399,13 @@ def nonzero(input, as_tuple=False): #[] """ list_out = [] - shape = input.shape + shape = x.shape rank = len(shape) if in_dygraph_mode(): - outs = core.ops.where_index(input) + outs = core.ops.where_index(x) else: - outs = layers.where(input) + outs = layers.where(x) if not as_tuple: return outs -- GitLab