diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 761f6409fed761c3799c4c32fccdec768aa2d5d9..4a9ce4454af0be86f784a8ea9bcbc81564d9a383 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -4884,29 +4884,35 @@ class LookaheadOptimizer(object): import paddle import paddle.fluid as fluid import numpy as np + import numpy.random as random - x = fluid.layers.data(name='x', shape=[2], dtype='float32') - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - y = fluid.layers.fc(input=[x], size=2, act="softmax") - loss = fluid.layers.cross_entropy(input=y, label=label) - loss = fluid.layers.mean(x=loss) - sgd = fluid.optimizer.SGD(learning_rate=0.01) - optimizer = fluid.optimizer.LookaheadOptimizer(sgd, - alpha=0.5, - k=5) - optimizer.minimize(loss) - main_program = fluid.default_main_program() - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - feeder = fluid.DataFeeder(feed_list=[x, label], place=place) + paddle.enable_static() + + x = fluid.layers.data(name='x', shape=[2], dtype='float32') + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + y = fluid.layers.fc(input=[x], size=2, act="softmax") + loss = fluid.layers.cross_entropy(input=y, label=label) + loss = fluid.layers.mean(x=loss) + sgd = fluid.optimizer.SGD(learning_rate=0.01) + optimizer = fluid.optimizer.LookaheadOptimizer(sgd, + alpha=0.5, + k=5) + optimizer.minimize(loss) + main_program = fluid.default_main_program() + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) - step = 0 - while(step < 10): - step += 1 - exe.run(fluid.default_main_program(), - feed=feeder.feed(batch_data)) + def train_reader(limit=5): + for i in range(limit): + yield random.random([2]).astype('float32'), random.random([1]).astype('int64') + + feeder = fluid.DataFeeder(feed_list=[x, label], place=place) + reader = paddle.batch(paddle.reader.shuffle(train_reader, buf_size=50000),batch_size=1) + + for batch_data in reader(): + exe.run(fluid.default_main_program(), + feed=feeder.feed(batch_data)) """ diff --git a/python/paddle/fluid/tests/unittests/test_nonzero_api.py b/python/paddle/fluid/tests/unittests/test_nonzero_api.py index 0e68f9d5be761bf92330f5dd57ce540ffc973b1d..8569be82db09e0d2b8ac4ea41816075b1b1f1467 100644 --- a/python/paddle/fluid/tests/unittests/test_nonzero_api.py +++ b/python/paddle/fluid/tests/unittests/test_nonzero_api.py @@ -76,6 +76,14 @@ class TestNonZeroAPI(unittest.TestCase): expect_out = np.array([[0], [1]]) self.assertTrue(np.allclose(expect_out, np.array(res))) + def test_dygraph_api(self): + data_x = np.array([[True, False], [False, True]]) + with fluid.dygraph.guard(): + x = fluid.dygraph.to_variable(data_x) + z = paddle.nonzero(x) + np_z = z.numpy() + expect_out = np.array([[0, 0], [1, 1]]) + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/optimizer/__init__.py b/python/paddle/optimizer/__init__.py index 30de88cc29e7620fe96c09acbe9b7478e465f8a2..6f485e2e9d62fca3d50a972f1cde1ca07c8c4cfb 100644 --- a/python/paddle/optimizer/__init__.py +++ b/python/paddle/optimizer/__init__.py @@ -15,19 +15,17 @@ __all__ = [ 'Adadelta', 'AdadeltaOptimizer', 'Adagrad', 'AdagradOptimizer', 'Adam', 'Adamax', 'AdamW', 'DecayedAdagrad', 'DecayedAdagradOptimizer', 'Dpsgd', - 'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'LookaheadOptimizer', - 'ModelAverage', 'Momentum', 'MomentumOptimizer', 'RMSProp', 'SGD', - 'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR', 'PiecewiseLR', - 'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR', 'LinearLrWarmup', - 'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR', 'ReduceLROnPlateau', - 'CosineAnnealingLR' + 'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'Momentum', 'MomentumOptimizer', + 'RMSProp', 'SGD', 'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR', + 'PiecewiseLR', 'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR', + 'LinearLrWarmup', 'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR', + 'ReduceLROnPlateau', 'CosineAnnealingLR' ] from ..fluid.optimizer import Momentum, Adagrad, Dpsgd, DecayedAdagrad, Ftrl,\ AdagradOptimizer, DpsgdOptimizer, DecayedAdagradOptimizer, \ - FtrlOptimizer, AdadeltaOptimizer, ModelAverage, \ - LookaheadOptimizer + FtrlOptimizer, AdadeltaOptimizer from .optimizer import Optimizer from .adam import Adam diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index f55d285586f0ec6959573af64e720bea5de10c8d..19d8fc58b0e7e7162c777ac1a56c3b9c5ac08283 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -339,11 +339,8 @@ def index_select(x, index, axis=0, name=None): return out -def nonzero(input, as_tuple=False): +def nonzero(x, as_tuple=False): """ - :alias_main: paddle.nonzero - :alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero - Return a tensor containing the indices of all non-zero elements of the `input` tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension in `input`, each containing the indices (in that dimension) of all non-zero elements @@ -353,17 +350,17 @@ def nonzero(input, as_tuple=False): a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1]. Args: - inputs (Variable): The input tensor variable. + x (Tensor): The input tensor variable. as_tuple (bool): Return type, Tensor or tuple of Tensor. Returns: - Variable. The data type is int64. + Tensor. The data type is int64. Examples: + .. code-block:: python - import paddle - paddle.disable_static() + import paddle x1 = paddle.to_tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], @@ -402,13 +399,13 @@ def nonzero(input, as_tuple=False): #[] """ list_out = [] - shape = input.shape + shape = x.shape rank = len(shape) if in_dygraph_mode(): - outs = core.ops.where_index(input) + outs = core.ops.where_index(x) else: - outs = layers.where(input) + outs = layers.where(x) if not as_tuple: return outs