diff --git a/python/paddle/distribution.py b/python/paddle/distribution.py index ad134b4591e8ddd638675e9bb88f2958b4b4648d..7f0d71e3877f7c0c5ac8cd85d7eba6db60cfd718 100644 --- a/python/paddle/distribution.py +++ b/python/paddle/distribution.py @@ -197,11 +197,9 @@ class Uniform(Distribution): Examples: .. code-block:: python - import numpy as np import paddle from paddle.distribution import Uniform - paddle.disable_static() # Without broadcasting, a single uniform distribution [3, 4]: u1 = Uniform(low=3.0, high=4.0) # 2 distributions [1, 3], [2, 4] @@ -214,8 +212,7 @@ class Uniform(Distribution): u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # Complete example - value_npdata = np.array([0.8], dtype="float32") - value_tensor = paddle.to_tensor(value_npdata) + value_tensor = paddle.to_tensor([0.8], dtype="float32") uniform = Uniform([0.], [2.]) @@ -419,11 +416,9 @@ class Normal(Distribution): Examples: .. code-block:: python - import numpy as np import paddle from paddle.distribution import Normal - paddle.disable_static() # Define a single scalar Normal distribution. dist = Normal(loc=0., scale=3.) # Define a batch of two scalar valued Normals. @@ -437,8 +432,7 @@ class Normal(Distribution): dist = Normal(loc=1., scale=[11., 22.]) # Complete example - value_npdata = np.array([0.8], dtype="float32") - value_tensor = paddle.to_tensor(value_npdata) + value_tensor = paddle.to_tensor([0.8], dtype="float32") normal_a = Normal([0.], [1.]) normal_b = Normal([0.5], [2.]) @@ -672,13 +666,13 @@ class Categorical(Distribution): paddle.seed(100) # on CPU device x = paddle.rand([6]) - print(x.numpy()) + print(x) # [0.5535528 0.20714243 0.01162981 # 0.51577556 0.36369765 0.2609165 ] paddle.seed(200) # on CPU device y = paddle.rand([6]) - print(y.numpy()) + print(y) # [0.77663314 0.90824795 0.15685187 # 0.04279523 0.34468332 0.7955718 ] @@ -746,7 +740,7 @@ class Categorical(Distribution): paddle.seed(100) # on CPU device x = paddle.rand([6]) - print(x.numpy()) + print(x) # [0.5535528 0.20714243 0.01162981 # 0.51577556 0.36369765 0.2609165 ] @@ -793,13 +787,13 @@ class Categorical(Distribution): paddle.seed(100) # on CPU device x = paddle.rand([6]) - print(x.numpy()) + print(x) # [0.5535528 0.20714243 0.01162981 # 0.51577556 0.36369765 0.2609165 ] paddle.seed(200) # on CPU device y = paddle.rand([6]) - print(y.numpy()) + print(y) # [0.77663314 0.90824795 0.15685187 # 0.04279523 0.34468332 0.7955718 ] @@ -844,7 +838,7 @@ class Categorical(Distribution): paddle.seed(100) # on CPU device x = paddle.rand([6]) - print(x.numpy()) + print(x) # [0.5535528 0.20714243 0.01162981 # 0.51577556 0.36369765 0.2609165 ] @@ -889,7 +883,7 @@ class Categorical(Distribution): paddle.seed(100) # on CPU device x = paddle.rand([6]) - print(x.numpy()) + print(x) # [0.5535528 0.20714243 0.01162981 # 0.51577556 0.36369765 0.2609165 ] @@ -955,7 +949,7 @@ class Categorical(Distribution): paddle.seed(100) # on CPU device x = paddle.rand([6]) - print(x.numpy()) + print(x) # [0.5535528 0.20714243 0.01162981 # 0.51577556 0.36369765 0.2609165 ] diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index 1e09bfc42cb1bf8cd1942dad3e1c4cd1206249fc..9c0ce07c8e428ed57c5b599e3eeb89d3c9f675cc 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -659,12 +659,12 @@ def nce(input, ${comment} Args: - input (Variable): Input variable, 2-D tensor with shape [batch_size, dim], + input (Tensor): Input tensor, 2-D tensor with shape [batch_size, dim], and data type is float32 or float64. - label (Variable): Input label, 2-D tensor with shape [batch_size, num_true_class], + label (Tensor): Input label, 2-D tensor with shape [batch_size, num_true_class], and data type is int64. num_total_classes (int):${num_total_classes_comment}. - sample_weight (Variable|None): A Variable of shape [batch_size, 1] + sample_weight (Tensor|None): A Tensor of shape [batch_size, 1] storing a weight for each sample. The default weight for each sample is 1.0. param_attr (ParamAttr|None): To specify the weight parameter attribute. @@ -688,19 +688,21 @@ def nce(input, the weight@GRAD and bias@GRAD will be changed to SelectedRows. Default False. Returns: - Variable: The output nce loss. + Tensor: The output nce loss. Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle import numpy as np + paddle.enable_static() + window_size = 5 words = [] for i in range(window_size): - words.append(fluid.data( + words.append(paddle.static.data( name='word_{0}'.format(i), shape=[-1, 1], dtype='int64')) dict_size = 10000 @@ -711,18 +713,18 @@ def nce(input, if i == label_word: continue - emb = fluid.layers.embedding(input=words[i], size=[dict_size, 32], - param_attr='embed', is_sparse=True) + emb = paddle.static.nn.embedding(input=words[i], size=[dict_size, 32], + param_attr='embed', is_sparse=True) embs.append(emb) - embs = fluid.layers.concat(input=embs, axis=1) - loss = fluid.layers.nce(input=embs, label=words[label_word], - num_total_classes=dict_size, param_attr='nce.w_0', - bias_attr='nce.b_0') + embs = paddle.concat(x=embs, axis=1) + loss = paddle.static.nn.nce(input=embs, label=words[label_word], + num_total_classes=dict_size, param_attr='nce.w_0', + bias_attr='nce.b_0') #or use custom distribution dist = np.array([0.05,0.5,0.1,0.3,0.05]) - loss = fluid.layers.nce(input=embs, label=words[label_word], + loss = paddle.static.nn.nce(input=embs, label=words[label_word], num_total_classes=5, param_attr='nce.w_1', bias_attr='nce.b_1', num_neg_samples=3, diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 2971c3087bc318149b39befdfe6a78d5af34a5be..ba7ca417382e26258c24af81ad64bb65d32bf83e 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -113,13 +113,13 @@ def multinomial(x, num_samples=1, replacement=False, name=None): paddle.seed(100) # on CPU device x = paddle.rand([2,4]) - print(x.numpy()) + print(x) # [[0.5535528 0.20714243 0.01162981 0.51577556] # [0.36369765 0.2609165 0.18905126 0.5621971 ]] paddle.seed(200) # on CPU device out1 = paddle.multinomial(x, num_samples=5, replacement=True) - print(out1.numpy()) + print(out1) # [[3 3 0 0 0] # [3 3 3 1 0]] @@ -129,7 +129,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None): paddle.seed(300) # on CPU device out3 = paddle.multinomial(x, num_samples=3) - print(out3.numpy()) + print(out3) # [[3 0 1] # [3 1 0]]