diff --git a/python/paddle/fluid/data.py b/python/paddle/fluid/data.py index 05ea66f54451ba08032bff4e7bc805bbffa15e73..31906c465a07430ebfebe5dfcb42b67c9fe5c742 100644 --- a/python/paddle/fluid/data.py +++ b/python/paddle/fluid/data.py @@ -73,8 +73,10 @@ def data(name, shape, dtype='float32', lod_level=0): Examples: .. code-block:: python + import paddle import paddle.fluid as fluid import numpy as np + paddle.enable_static() # Creates a variable with fixed size [3, 2, 1] # User can only feed data of the same shape to x diff --git a/python/paddle/fluid/dataset.py b/python/paddle/fluid/dataset.py index cf9d40d7b00c032679e6a71a0f6331a13d2379b2..438831208b66ac2ef98fb21b8ff5336598ebfc9e 100644 --- a/python/paddle/fluid/dataset.py +++ b/python/paddle/fluid/dataset.py @@ -727,6 +727,7 @@ class InMemoryDataset(DatasetBase): Examples: .. code-block:: python + # required: skiptest import paddle.fluid as fluid dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] @@ -753,6 +754,7 @@ class InMemoryDataset(DatasetBase): Examples: .. code-block:: python + # required: skiptest import paddle.fluid as fluid dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] @@ -777,6 +779,7 @@ class InMemoryDataset(DatasetBase): Examples: .. code-block:: python + # required: skiptest import paddle.fluid as fluid dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] @@ -797,6 +800,7 @@ class InMemoryDataset(DatasetBase): Examples: .. code-block:: python + # required: skiptest import paddle.fluid as fluid dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] @@ -819,6 +823,7 @@ class InMemoryDataset(DatasetBase): Examples: .. code-block:: python + # required: skiptest import paddle.fluid as fluid from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") @@ -866,6 +871,7 @@ class InMemoryDataset(DatasetBase): Examples: .. code-block:: python + # required: skiptest import paddle.fluid as fluid from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") @@ -925,6 +931,7 @@ class InMemoryDataset(DatasetBase): Examples: .. code-block:: python + # required: skiptest import paddle.fluid as fluid from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") @@ -965,6 +972,7 @@ class InMemoryDataset(DatasetBase): Examples: .. code-block:: python + # required: skiptest import paddle.fluid as fluid from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") diff --git a/python/paddle/fluid/input.py b/python/paddle/fluid/input.py index d7a8e3bcb825243a52efb6a55c3d567b643c8d03..a83a230132300a88348d1bb0aec1274d4fa1713e 100644 --- a/python/paddle/fluid/input.py +++ b/python/paddle/fluid/input.py @@ -98,7 +98,10 @@ def one_hot(input, depth, allow_out_of_range=False): Examples: .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() + # Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4]. label = fluid.data(name="label", shape=[4], dtype="int64") one_hot_label = fluid.one_hot(input=label, depth=4) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 8167a0c44373ca7e2ec34ec3bc6c4f8a82699ad8..515d4a5c0ef7cd1be858fe47f7a5b307567a01b2 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4521,7 +4521,10 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): Examples: .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() + # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] @@ -5160,7 +5163,10 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): # x: [M], y: [N] # fluid.layers.matmul(x, y, True, True) # out: [M, N] + import paddle import paddle.fluid as fluid + paddle.enable_static() + x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32') y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32') out = fluid.layers.matmul(x, y, True, True) @@ -5999,7 +6005,10 @@ def one_hot(input, depth, allow_out_of_range=False): Examples: .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() + # Correspond to the first example above, where label.shape is [4, 1] and one_hot_label.shape is [4, 4]. label = fluid.data(name="label", shape=[4, 1], dtype="int64") one_hot_label = fluid.layers.one_hot(input=label, depth=4) @@ -8363,7 +8372,10 @@ def gather(input, index, overwrite=True): .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() + x = fluid.data(name='x', shape=[-1, 5], dtype='float32') index = fluid.data(name='index', shape=[-1, 1], dtype='int32') output = fluid.layers.gather(x, index) @@ -8453,7 +8465,10 @@ def gather_nd(input, index, name=None): .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() + x = fluid.data(name='x', shape=[3, 4, 5], dtype='float32') index = fluid.data(name='index', shape=[2, 2], dtype='int32') output = fluid.layers.gather_nd(x, index) @@ -8488,6 +8503,7 @@ def scatter(input, index, updates, name=None, overwrite=True): Output is obtained by updating the input on selected indices based on updates. .. code-block:: python + import numpy as np #input: @@ -8529,8 +8545,10 @@ def scatter(input, index, updates, name=None, overwrite=True): .. code-block:: python + import paddle import numpy as np import paddle.fluid as fluid + paddle.enable_static() input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False) index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False) @@ -8871,8 +8889,10 @@ def selu(x, scale=None, alpha=None, name=None): .. code-block:: python + import paddle import paddle.fluid as fluid import numpy as np + paddle.enable_static() inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32") output = fluid.layers.selu(inputs) @@ -10480,22 +10500,24 @@ def expand_as(x, target_tensor, name=None): Examples: .. code-block:: python - import paddle.fluid as fluid - import numpy as np + import paddle + import paddle.fluid as fluid + import numpy as np + paddle.enable_static() - data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64') - target_tensor = fluid.layers.data( - name="target_tensor", shape=[-1,20], dtype='float64') - result = fluid.layers.expand_as(x=data, target_tensor=target_tensor) - use_cuda = False - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - x = np.random.rand(3,10) - y = np.random.rand(3,20) - output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name]) - print(output[0].shape) - #(3,20) + data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64') + target_tensor = fluid.layers.data( + name="target_tensor", shape=[-1,20], dtype='float64') + result = fluid.layers.expand_as(x=data, target_tensor=target_tensor) + use_cuda = False + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + x = np.random.rand(3,10) + y = np.random.rand(3,20) + output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name]) + print(output[0].shape) + #(3,20) """ if in_dygraph_mode(): @@ -10576,7 +10598,9 @@ def uniform_random_batch_size_like(input, Examples: .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() # example 1: input = fluid.data(name="input", shape=[1, 3], dtype='float32') @@ -10649,7 +10673,9 @@ def gaussian_random(shape, Examples: .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() # example 1: # attr shape is a list which doesn't contain Tensor. @@ -10677,7 +10703,8 @@ def gaussian_random(shape, .. code-block:: python - # declarative mode + # declarative mode + # required: skiptest import numpy as np from paddle import fluid @@ -10816,7 +10843,10 @@ def gaussian_random_batch_size_like(input, Examples: .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() + input = fluid.data(name="input", shape=[13, 11], dtype='float32') out = fluid.layers.gaussian_random_batch_size_like( @@ -11422,7 +11452,9 @@ def size(input): Examples: .. code-block:: python + import paddle import paddle.fluid.layers as layers + paddle.enable_static() input = layers.data( name="input", shape=[3, 100], dtype="float32", append_batch_size=False) @@ -12525,7 +12557,10 @@ def mean(x, name=None): Examples: .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() + input = fluid.layers.data( name='data', shape=[2, 3], dtype='float32') mean = fluid.layers.mean(input) @@ -15195,7 +15230,9 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0, Examples: .. code-block:: python + import paddle import paddle.fluid as fluid + paddle.enable_static() # example 1: # attr shape is a list which doesn't contain Tensor. diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index ec20712cac76043c7632aaa060e07153bdde4fe7..c3d8fbfa55307451cd2f32b4986de01ed0bcfc46 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -2528,18 +2528,21 @@ def lstm(input, Examples: .. code-block:: python + import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers + paddle.enable_static() emb_dim = 256 vocab_size = 10000 data = fluid.data(name='x', shape=[None, 100], dtype='int64') emb = fluid.embedding(input=data, size=[vocab_size, emb_dim], is_sparse=True) - batch_size = 20 + batch_size = 100 dropout_prob = 0.2 input_size = 100 hidden_size = 150 num_layers = 1 + max_len = 12 init_h = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 ) init_c = layers.fill_constant( [num_layers, batch_size, hidden_size], 'float32', 0.0 ) rnn_out, last_h, last_c = layers.lstm( emb, init_h, init_c, \