未验证 提交 54c368db 编写于 作者: 1 123malin 提交者: GitHub

[API 2.0: doc] fix doc of nonzero (#27685)

* test=develop, update example
上级 69a3339a
......@@ -4884,6 +4884,9 @@ class LookaheadOptimizer(object):
import paddle
import paddle.fluid as fluid
import numpy as np
import numpy.random as random
paddle.enable_static()
x = fluid.layers.data(name='x', shape=[2], dtype='float32')
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
......@@ -4900,11 +4903,14 @@ class LookaheadOptimizer(object):
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
def train_reader(limit=5):
for i in range(limit):
yield random.random([2]).astype('float32'), random.random([1]).astype('int64')
feeder = fluid.DataFeeder(feed_list=[x, label], place=place)
reader = paddle.batch(paddle.reader.shuffle(train_reader, buf_size=50000),batch_size=1)
step = 0
while(step < 10):
step += 1
for batch_data in reader():
exe.run(fluid.default_main_program(),
feed=feeder.feed(batch_data))
......
......@@ -76,6 +76,14 @@ class TestNonZeroAPI(unittest.TestCase):
expect_out = np.array([[0], [1]])
self.assertTrue(np.allclose(expect_out, np.array(res)))
def test_dygraph_api(self):
data_x = np.array([[True, False], [False, True]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(data_x)
z = paddle.nonzero(x)
np_z = z.numpy()
expect_out = np.array([[0, 0], [1, 1]])
if __name__ == "__main__":
unittest.main()
......@@ -15,19 +15,17 @@
__all__ = [
'Adadelta', 'AdadeltaOptimizer', 'Adagrad', 'AdagradOptimizer', 'Adam',
'Adamax', 'AdamW', 'DecayedAdagrad', 'DecayedAdagradOptimizer', 'Dpsgd',
'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'LookaheadOptimizer',
'ModelAverage', 'Momentum', 'MomentumOptimizer', 'RMSProp', 'SGD',
'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR', 'PiecewiseLR',
'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR', 'LinearLrWarmup',
'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR', 'ReduceLROnPlateau',
'CosineAnnealingLR'
'DpsgdOptimizer', 'Ftrl', 'FtrlOptimizer', 'Momentum', 'MomentumOptimizer',
'RMSProp', 'SGD', 'SGDOptimizer', 'Optimizer', '_LRScheduler', 'NoamLR',
'PiecewiseLR', 'NaturalExpLR', 'InverseTimeLR', 'PolynomialLR',
'LinearLrWarmup', 'ExponentialLR', 'MultiStepLR', 'StepLR', 'LambdaLR',
'ReduceLROnPlateau', 'CosineAnnealingLR'
]
from ..fluid.optimizer import Momentum, Adagrad, Dpsgd, DecayedAdagrad, Ftrl,\
AdagradOptimizer, DpsgdOptimizer, DecayedAdagradOptimizer, \
FtrlOptimizer, AdadeltaOptimizer, ModelAverage, \
LookaheadOptimizer
FtrlOptimizer, AdadeltaOptimizer
from .optimizer import Optimizer
from .adam import Adam
......
......@@ -339,11 +339,8 @@ def index_select(x, index, axis=0, name=None):
return out
def nonzero(input, as_tuple=False):
def nonzero(x, as_tuple=False):
"""
:alias_main: paddle.nonzero
:alias: paddle.nonzero,paddle.tensor.nonzero,paddle.tensor.search.nonzero
Return a tensor containing the indices of all non-zero elements of the `input`
tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension
in `input`, each containing the indices (in that dimension) of all non-zero elements
......@@ -353,17 +350,17 @@ def nonzero(input, as_tuple=False):
a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].
Args:
inputs (Variable): The input tensor variable.
x (Tensor): The input tensor variable.
as_tuple (bool): Return type, Tensor or tuple of Tensor.
Returns:
Variable. The data type is int64.
Tensor. The data type is int64.
Examples:
.. code-block:: python
import paddle
paddle.disable_static()
import paddle
x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
......@@ -402,13 +399,13 @@ def nonzero(input, as_tuple=False):
#[]
"""
list_out = []
shape = input.shape
shape = x.shape
rank = len(shape)
if in_dygraph_mode():
outs = core.ops.where_index(input)
outs = core.ops.where_index(x)
else:
outs = layers.where(input)
outs = layers.where(x)
if not as_tuple:
return outs
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册