未验证 提交 26fba07c 编写于 作者: M megemini 提交者: GitHub

[Add] Paddle 代码 CI 中引入 xdoctest 检查 (#55295)

* [Add]Add Xdoctester

* [Fix]fix beta docstring

* [Doctest]change dirichlet docstring

* [Doctest]change gumbel docstring

* [Doctest]change bernoulli docstring

* [Doctest]change categorical docstring

* [Doctest]change ops.py docstring

* [Doctest]change conv docstring

* [Doctest]change distance docstring, test=docs_preview

* [Change]add ref

* [Change]patch xdoctest debug
上级 e64e1b97
......@@ -72,23 +72,23 @@ class Bernoulli(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
>>> import paddle
>>> from paddle.distribution import Bernoulli
# init `probs` with a float
rv = Bernoulli(probs=0.3)
>>> # init `probs` with a float
>>> rv = Bernoulli(probs=0.3)
print(rv.mean)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.30000001)
>>> print(rv.mean)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.30000001)
print(rv.variance)
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.21000001)
>>> print(rv.variance)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.21000001)
print(rv.entropy())
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.61086434)
>>> print(rv.entropy())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.61086434)
"""
def __init__(self, probs, name=None):
......@@ -156,24 +156,24 @@ class Bernoulli(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
>>> import paddle
>>> from paddle.distribution import Bernoulli
rv = Bernoulli(paddle.full((), 0.3))
print(rv.sample([100]).shape)
# [100]
>>> rv = Bernoulli(paddle.full((1), 0.3))
>>> print(rv.sample([100]).shape)
[100, 1]
rv = Bernoulli(paddle.to_tensor(0.3))
print(rv.sample([100]).shape)
# [100, 1]
>>> rv = Bernoulli(paddle.to_tensor(0.3))
>>> print(rv.sample([100]).shape)
[100]
rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
print(rv.sample([100]).shape)
# [100, 2]
>>> rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
>>> print(rv.sample([100]).shape)
[100, 2]
rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
print(rv.sample([100, 2]).shape)
# [100, 2, 2]
>>> rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
>>> print(rv.sample([100, 2]).shape)
[100, 2, 2]
"""
name = self.name + '_sample'
if not in_dynamic_mode():
......@@ -211,48 +211,48 @@ class Bernoulli(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
paddle.seed(2023)
rv = Bernoulli(paddle.full((), 0.3))
print(rv.sample([100]).shape)
# [100]
rv = Bernoulli(0.3)
print(rv.rsample([100]).shape)
# [100, 1]
rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
print(rv.rsample([100]).shape)
# [100, 2]
rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
print(rv.rsample([100, 2]).shape)
# [100, 2, 2]
# `rsample` has to be followed by a `sigmoid`
rv = Bernoulli(0.3)
rsample = rv.rsample([3, ])
rsample_sigmoid = paddle.nn.functional.sigmoid(rsample)
print(rsample, rsample_sigmoid)
# Tensor(shape=[3, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[-0.88315082],
# [-0.62347704],
# [-0.31513220]]) Tensor(shape=[3, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0.29252526],
# [0.34899110],
# [0.42186251]])
# The smaller the `temperature`, the distribution of `rsample` closer to `sample`, with `probs` of 0.3.
print(paddle.nn.functional.sigmoid(rv.rsample([1000, ], temperature=1.0)).sum())
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 361.06829834)
print(paddle.nn.functional.sigmoid(rv.rsample([1000, ], temperature=0.1)).sum())
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 288.66418457)
>>> import paddle
>>> from paddle.distribution import Bernoulli
>>> rv = Bernoulli(paddle.full((1), 0.3))
>>> print(rv.sample([100]).shape)
[100, 1]
>>> rv = Bernoulli(0.3)
>>> print(rv.rsample([100]).shape)
[100]
>>> rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
>>> print(rv.rsample([100]).shape)
[100, 2]
>>> rv = Bernoulli(paddle.to_tensor([0.3, 0.5]))
>>> print(rv.rsample([100, 2]).shape)
[100, 2, 2]
>>> # `rsample` has to be followed by a `sigmoid`
>>> # doctest: +SKIP
>>> rv = Bernoulli(0.3)
>>> rsample = rv.rsample([3, ])
>>> rsample_sigmoid = paddle.nn.functional.sigmoid(rsample)
>>> print(rsample, rsample_sigmoid)
Tensor(shape=[3, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[-0.88315082],
[-0.62347704],
[-0.31513220]])
Tensor(shape=[3, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.29252526],
[0.34899110],
[0.42186251]])
>>> # The smaller the `temperature`, the distribution of `rsample` closer to `sample`, with `probs` of 0.3.
>>> print(paddle.nn.functional.sigmoid(rv.rsample([1000, ], temperature=1.0)).sum())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
361.06829834)
>>> print(paddle.nn.functional.sigmoid(rv.rsample([1000, ], temperature=0.1)).sum())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
288.66418457)
"""
name = self.name + '_rsample'
if not in_dynamic_mode():
......@@ -308,13 +308,13 @@ class Bernoulli(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
>>> import paddle
>>> from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
print(rv.cdf(paddle.to_tensor([1.0])))
# Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [1.])
>>> rv = Bernoulli(0.3)
>>> print(rv.cdf(paddle.to_tensor([1.0])))
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.])
"""
name = self.name + '_cdf'
if not in_dynamic_mode():
......@@ -346,13 +346,13 @@ class Bernoulli(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
>>> import paddle
>>> from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
print(rv.log_prob(paddle.to_tensor([1.0])))
# Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [-1.20397282])
>>> rv = Bernoulli(0.3)
>>> print(rv.log_prob(paddle.to_tensor([1.0])))
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[-1.20397282])
"""
name = self.name + '_log_prob'
if not in_dynamic_mode():
......@@ -385,13 +385,13 @@ class Bernoulli(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
>>> import paddle
>>> from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
print(rv.prob(paddle.to_tensor([1.0])))
# Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
# [0.29999998])
>>> rv = Bernoulli(0.3)
>>> print(rv.prob(paddle.to_tensor([1.0])))
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.29999998])
"""
name = self.name + '_prob'
if not in_dynamic_mode():
......@@ -415,13 +415,13 @@ class Bernoulli(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
>>> import paddle
>>> from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
print(rv.entropy())
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.61086434)
>>> rv = Bernoulli(0.3)
>>> print(rv.entropy())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.61086434)
"""
name = self.name + '_entropy'
......@@ -448,15 +448,15 @@ class Bernoulli(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
from paddle.distribution import Bernoulli
>>> import paddle
>>> from paddle.distribution import Bernoulli
rv = Bernoulli(0.3)
rv_other = Bernoulli(0.7)
>>> rv = Bernoulli(0.3)
>>> rv_other = Bernoulli(0.7)
print(rv.kl_divergence(rv_other))
# Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
# 0.33891910)
>>> print(rv.kl_divergence(rv_other))
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.33891910)
"""
name = self.name + '_kl_divergence'
if not in_dynamic_mode():
......
......@@ -55,31 +55,35 @@ class Beta(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
# scale input
beta = paddle.distribution.Beta(alpha=0.5, beta=0.5)
print(beta.mean)
# Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# 0.50000000)
print(beta.variance)
# Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# 0.12500000)
print(beta.entropy())
# Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# 0.12500000)
# tensor input with broadcast
beta = paddle.distribution.Beta(alpha=paddle.to_tensor([0.2, 0.4]), beta=0.6)
print(beta.mean)
# Tensor(shape=[2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [0.25000000, 0.40000001])
print(beta.variance)
# Tensor(shape=[2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [0.10416666, 0.12000000])
print(beta.entropy())
# Tensor(shape=[2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-1.91923141, -0.38095069])
>>> import paddle
>>> # scale input
>>> beta = paddle.distribution.Beta(alpha=0.5, beta=0.5)
>>> print(beta.mean)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.50000000)
>>> print(beta.variance)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
0.12500000)
>>> print(beta.entropy())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
-0.24156499)
>>> # tensor input with broadcast
>>> beta = paddle.distribution.Beta(alpha=paddle.to_tensor([0.2, 0.4]), beta=0.6)
>>> print(beta.mean)
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.25000000, 0.40000001])
>>> print(beta.variance)
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.10416666, 0.12000000])
>>> print(beta.entropy())
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[-1.91923141, -0.38095081])
"""
def __init__(self, alpha, beta):
......
......@@ -46,42 +46,48 @@ class Categorical(distribution.Distribution):
Examples:
.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
paddle.seed(200) # on CPU device
y = paddle.rand([6])
print(y)
# [0.77663314 0.90824795 0.15685187
# 0.04279523 0.34468332 0.7955718 ]
cat = Categorical(x)
cat2 = Categorical(y)
paddle.seed(1000) # on CPU device
cat.sample([2,3])
# [[0, 0, 5],
# [3, 4, 5]]
cat.entropy()
# [1.77528]
cat.kl_divergence(cat2)
# [0.071952]
value = paddle.to_tensor([2,1,3])
cat.probs(value)
# [0.00608027 0.108298 0.269656]
cat.log_prob(value)
# [-5.10271 -2.22287 -1.31061]
>>> import paddle
>>> from paddle.distribution import Categorical
>>> paddle.seed(100) # on CPU device
>>> x = paddle.rand([6])
>>> print(x)
Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.55355281, 0.20714243, 0.01162981, 0.51577556, 0.36369765, 0.26091650])
>>> paddle.seed(200) # on CPU device
>>> y = paddle.rand([6])
>>> print(y)
Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.77663314, 0.90824795, 0.15685187, 0.04279523, 0.34468332, 0.79557180])
>>> cat = Categorical(x)
>>> cat2 = Categorical(y)
>>> # doctest: +SKIP
>>> paddle.seed(1000) # on CPU device
>>> print(cat.sample([2,3]))
Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 5],
[3, 4, 5]])
>>> # doctest: -SKIP
>>> print(cat.entropy())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.77528250)
>>> print(cat.kl_divergence(cat2))
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.07195196])
>>> value = paddle.to_tensor([2,1,3])
>>> print(cat.probs(value))
Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.00608027, 0.10829761, 0.26965630])
>>> print(cat.log_prob(value))
Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[-5.10270691, -2.22287226, -1.31060708])
"""
def __init__(self, logits, name=None):
......@@ -128,22 +134,22 @@ class Categorical(distribution.Distribution):
Examples:
.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
cat = Categorical(x)
paddle.seed(1000) # on CPU device
cat.sample([2,3])
# [[0, 0, 5],
# [3, 4, 5]]
>>> import paddle
>>> from paddle.distribution import Categorical
>>> paddle.seed(100) # on CPU device
>>> x = paddle.rand([6])
>>> print(x)
Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.55355281, 0.20714243, 0.01162981, 0.51577556, 0.36369765, 0.26091650])
>>> # doctest: +SKIP
>>> cat = Categorical(x)
>>> paddle.seed(1000) # on CPU device
>>> print(cat.sample([2,3]))
Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 5],
[3, 4, 5]])
"""
name = self.name + '_sample'
if not in_dynamic_mode():
......@@ -185,27 +191,27 @@ class Categorical(distribution.Distribution):
Examples:
.. code-block:: python
import paddle
from paddle.distribution import Categorical
>>> import paddle
>>> from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
>>> paddle.seed(100) # on CPU device
>>> x = paddle.rand([6])
>>> print(x)
Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.55355281, 0.20714243, 0.01162981, 0.51577556, 0.36369765, 0.26091650])
paddle.seed(200) # on CPU device
y = paddle.rand([6])
print(y)
# [0.77663314 0.90824795 0.15685187
# 0.04279523 0.34468332 0.7955718 ]
>>> paddle.seed(200) # on CPU device
>>> y = paddle.rand([6])
>>> print(y)
Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.77663314, 0.90824795, 0.15685187, 0.04279523, 0.34468332, 0.79557180])
cat = Categorical(x)
cat2 = Categorical(y)
cat.kl_divergence(cat2)
# [0.071952]
>>> cat = Categorical(x)
>>> cat2 = Categorical(y)
>>> print(cat.kl_divergence(cat2))
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.07195196])
"""
name = self.name + '_kl_divergence'
if not in_dynamic_mode():
......@@ -239,20 +245,20 @@ class Categorical(distribution.Distribution):
Examples:
.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
>>> import paddle
>>> from paddle.distribution import Categorical
cat = Categorical(x)
>>> paddle.seed(100) # on CPU device
>>> x = paddle.rand([6])
>>> print(x)
Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.55355281, 0.20714243, 0.01162981, 0.51577556, 0.36369765, 0.26091650])
cat.entropy()
# [1.77528]
>>> cat = Categorical(x)
>>> print(cat.entropy())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
1.77528250)
"""
name = self.name + '_entropy'
logits = self.logits - paddle.max(self.logits, axis=-1, keepdim=True)
......@@ -283,21 +289,21 @@ class Categorical(distribution.Distribution):
Examples:
.. code-block:: python
import paddle
from paddle.distribution import Categorical
>>> import paddle
>>> from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
>>> paddle.seed(100) # on CPU device
>>> x = paddle.rand([6])
>>> print(x)
Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.55355281, 0.20714243, 0.01162981, 0.51577556, 0.36369765, 0.26091650])
cat = Categorical(x)
value = paddle.to_tensor([2,1,3])
cat.probs(value)
# [0.00608027 0.108298 0.269656]
>>> cat = Categorical(x)
>>> value = paddle.to_tensor([2,1,3])
>>> print(cat.probs(value))
Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.00608027, 0.10829761, 0.26965630])
"""
name = self.name + '_probs'
if len(self._prob.shape) == 1: # batch_shape is empty
......@@ -330,21 +336,21 @@ class Categorical(distribution.Distribution):
Examples:
.. code-block:: python
import paddle
from paddle.distribution import Categorical
paddle.seed(100) # on CPU device
x = paddle.rand([6])
print(x)
# [0.5535528 0.20714243 0.01162981
# 0.51577556 0.36369765 0.2609165 ]
>>> import paddle
>>> from paddle.distribution import Categorical
cat = Categorical(x)
>>> paddle.seed(100) # on CPU device
>>> x = paddle.rand([6])
>>> print(x)
Tensor(shape=[6], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.55355281, 0.20714243, 0.01162981, 0.51577556, 0.36369765, 0.26091650])
value = paddle.to_tensor([2,1,3])
cat.log_prob(value)
# [-5.10271 -2.22287 -1.31061]
>>> cat = Categorical(x)
>>> value = paddle.to_tensor([2,1,3])
>>> print(cat.log_prob(value))
Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[-5.10270691, -2.22287226, -1.31060708])
"""
name = self.name + '_log_prob'
......
......@@ -57,17 +57,15 @@ class Dirichlet(exponential_family.ExponentialFamily):
.. code-block:: python
import paddle
dirichlet = paddle.distribution.Dirichlet(paddle.to_tensor([1., 2., 3.]))
print(dirichlet.entropy())
# Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# -1.24434423)
print(dirichlet.prob(paddle.to_tensor([.3, .5, .6])))
# Tensor(shape=[], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# 10.80000114)
>>> import paddle
>>> dirichlet = paddle.distribution.Dirichlet(paddle.to_tensor([1., 2., 3.]))
>>> print(dirichlet.entropy())
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
-1.24434423)
>>> print(dirichlet.prob(paddle.to_tensor([.3, .5, .6])))
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
10.80000019)
"""
def __init__(self, concentration):
......
......@@ -46,25 +46,40 @@ class Gumbel(TransformedDistribution):
Examples:
.. code-block:: python
import paddle
from paddle.distribution.gumbel import Gumbel
# Gumbel distributed with loc=0, scale=1
dist = Gumbel(paddle.full([1], 0.0), paddle.full([1], 1.0))
dist.sample([2])
# Tensor(shape=[2, 1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [[-0.27544352], [-0.64499271]])
value = paddle.full([1], 0.5)
dist.prob(value)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [0.33070430])
dist.log_prob(value)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [-1.10653067])
dist.cdf(value)
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [0.54523915])
dist.entropy()
# Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [1.57721567])
dist.rsample([2])
# Tensor(shape=[2, 1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [[0.80463481], [0.91893655]])
>>> import paddle
>>> from paddle.distribution.gumbel import Gumbel
>>> # Gumbel distributed with loc=0, scale=1
>>> dist = Gumbel(paddle.full([1], 0.0), paddle.full([1], 1.0))
>>> # doctest: +SKIP
>>> print(dist.sample([2]))
Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.40484068],
[3.19400501]])
>>> print(dist.rsample([2]))
Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[-0.95093185],
[ 0.32422572]])
>>> # doctest: -SKIP
>>> value = paddle.full([1], 0.5)
>>> print(dist.prob(value))
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.33070430])
>>> print(dist.log_prob(value))
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[-1.10653067])
>>> print(dist.cdf(value))
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.54523921])
>>> print(dist.entropy())
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.57721567])
"""
def __init__(self, loc, scale):
......
......@@ -308,26 +308,26 @@ class Conv1D(_ConvNd):
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1D
x = paddle.to_tensor([[[4, 8, 1, 9],
[7, 2, 0, 9],
[6, 9, 2, 6]]], dtype="float32")
w = paddle.to_tensor([[[9, 3, 4],
[0, 0, 7],
[2, 5, 6]],
[[0, 3, 4],
[2, 9, 7],
[5, 6, 8]]], dtype="float32")
conv = Conv1D(3, 2, 3)
conv.weight.set_value(w)
y = conv(x)
print(y)
# Tensor(shape=[1, 2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[133., 238.],
# [160., 211.]]])
>>> import paddle
>>> from paddle.nn import Conv1D
>>> x = paddle.to_tensor([[[4, 8, 1, 9],
... [7, 2, 0, 9],
... [6, 9, 2, 6]]], dtype="float32")
>>> w = paddle.to_tensor([[[9, 3, 4],
... [0, 0, 7],
... [2, 5, 6]],
... [[0, 3, 4],
... [2, 9, 7],
... [5, 6, 8]]], dtype="float32")
>>> conv = Conv1D(3, 2, 3)
>>> conv.weight.set_value(w)
>>> y = conv(x)
>>> print(y)
Tensor(shape=[1, 2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[133., 238.],
[160., 211.]]])
"""
def __init__(
......@@ -494,22 +494,27 @@ class Conv1DTranspose(_ConvNd):
Examples:
.. code-block:: python
import paddle
from paddle.nn import Conv1DTranspose
# shape: (1, 2, 4)
x = paddle.to_tensor([[[4, 0, 9, 7],
[8, 0, 9, 2]]], dtype="float32")
# shape: (2, 1, 2)
w = paddle.to_tensor([[[7, 0]],
[[4, 2]]], dtype="float32")
conv = Conv1DTranspose(2, 1, 2)
conv.weight.set_value(w)
y = conv(x)
print(y)
# Tensor(shape=[1, 1, 5], dtype=float32, place=Place(gpu:0), stop_gradient=False,
# [[[60., 16., 99., 75., 4. ]]])
>>> import paddle
>>> from paddle.nn import Conv1DTranspose
>>> # shape: (1, 2, 4)
>>> x = paddle.to_tensor([[[4, 0, 9, 7],
... [8, 0, 9, 2]]], dtype="float32")
>>> print(x.shape)
[1, 2, 4]
>>> # shape: (2, 1, 2)
>>> w = paddle.to_tensor([[[7, 0]],
... [[4, 2]]], dtype="float32")
>>> print(w.shape)
[2, 1, 2]
>>> conv = Conv1DTranspose(2, 1, 2)
>>> conv.weight.set_value(w)
>>> y = conv(x)
>>> print(y)
Tensor(shape=[1, 1, 5], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[60., 16., 99., 75., 4. ]]])
"""
def __init__(
......@@ -655,17 +660,17 @@ class Conv2D(_ConvNd):
.. code-block:: python
import paddle
import paddle.nn as nn
>>> import paddle
>>> import paddle.nn as nn
paddle.disable_static()
>>> paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
>>> x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2D(4, 6, (3, 3))
y_var = conv(x_var)
print(y_var.shape)
# [2, 6, 6, 6]
>>> conv = nn.Conv2D(4, 6, (3, 3))
>>> y_var = conv(x_var)
>>> print(y_var.shape)
[2, 6, 6, 6]
"""
def __init__(
......@@ -827,17 +832,17 @@ class Conv2DTranspose(_ConvNd):
.. code-block:: python
import paddle
import paddle.nn as nn
>>> import paddle
>>> import paddle.nn as nn
paddle.disable_static()
>>> paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
>>> x_var = paddle.uniform((2, 4, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2DTranspose(4, 6, (3, 3))
y_var = conv(x_var)
print(y_var.shape)
# [2, 6, 10, 10]
>>> conv = nn.Conv2DTranspose(4, 6, (3, 3))
>>> y_var = conv(x_var)
>>> print(y_var.shape)
[2, 6, 10, 10]
"""
def __init__(
......@@ -984,17 +989,17 @@ class Conv3D(_ConvNd):
.. code-block:: python
import paddle
import paddle.nn as nn
>>> import paddle
>>> import paddle.nn as nn
paddle.disable_static()
>>> paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
>>> x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv3D(4, 6, (3, 3, 3))
y_var = conv(x_var)
print(y_var.shape)
# [2, 6, 6, 6, 6]
>>> conv = nn.Conv3D(4, 6, (3, 3, 3))
>>> y_var = conv(x_var)
>>> print(y_var.shape)
[2, 6, 6, 6, 6]
"""
def __init__(
......@@ -1163,17 +1168,17 @@ class Conv3DTranspose(_ConvNd):
.. code-block:: python
import paddle
import paddle.nn as nn
>>> import paddle
>>> import paddle.nn as nn
paddle.disable_static()
>>> paddle.disable_static()
x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
>>> x_var = paddle.uniform((2, 4, 8, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
y_var = conv(x_var)
print(y_var.shape)
# [2, 6, 10, 10, 10]
>>> conv = nn.Conv3DTranspose(4, 6, (3, 3, 3))
>>> y_var = conv(x_var)
>>> print(y_var.shape)
[2, 6, 10, 10, 10]
"""
def __init__(
......
......@@ -51,15 +51,14 @@ class PairwiseDistance(Layer):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
dist = paddle.nn.PairwiseDistance()
distance = dist(x, y)
print(distance)
# Tensor(shape=[2], dtype=float64, place=Place(gpu:0), stop_gradient=True,
# [4.99999860, 4.99999860])
>>> import paddle
>>> x = paddle.to_tensor([[1., 3.], [3., 5.]], dtype=paddle.float64)
>>> y = paddle.to_tensor([[5., 6.], [7., 8.]], dtype=paddle.float64)
>>> dist = paddle.nn.PairwiseDistance()
>>> distance = dist(x, y)
>>> print(distance)
Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True,
[4.99999860, 4.99999860])
"""
def __init__(self, p=2.0, epsilon=1e-6, keepdim=False, name=None):
......
......@@ -84,12 +84,15 @@ add_sample_code(
r"""
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = F.silu(x)
print(out)
# [ 0.7310586 1.7615942 2.8577224, 3.9280552 ]
>>> import paddle
>>> import paddle.nn.functional as F
>>> x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
>>> out = F.silu(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.73105860, 1.76159406, 2.85772228, 3.92805505])
""",
)
......@@ -98,12 +101,15 @@ add_sample_code(
r"""
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.log_sigmoid(x)
print(out)
# [-0.91301525 -0.79813887 -0.64439666 -0.55435524]
>>> import paddle
>>> import paddle.nn.functional as F
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = F.log_sigmoid(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.91301525, -0.79813892, -0.64439666, -0.55435526])
""",
)
......@@ -113,13 +119,13 @@ add_sample_code(
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.tanh(x)
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.tanh(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.37994900, -0.19737528, 0.09966799, 0.29131261])
""",
)
......@@ -129,14 +135,14 @@ add_sample_code(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.tanhshrink(x)
print(out)
# [-0.020051, -0.00262468, 0.000332005, 0.00868739]
>>> import paddle
>>> import paddle.nn.functional as F
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = F.tanhshrink(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.02005100, -0.00262472, 0.00033201, 0.00868741])
""",
)
......@@ -146,13 +152,13 @@ add_sample_code(
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.abs(x)
print(out)
# [0.4 0.2 0.1 0.3]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.abs(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.40000001, 0.20000000, 0.10000000, 0.30000001])
""",
)
......@@ -162,14 +168,14 @@ add_sample_code(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softplus(x)
print(out)
# [0.513015, 0.598139, 0.744397, 0.854355]
>>> import paddle
>>> import paddle.nn.functional as F
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = F.softplus(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.51301527, 0.59813893, 0.74439669, 0.85435522])
""",
)
......@@ -179,14 +185,14 @@ add_sample_code(
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.softsign(x)
print(out)
# [-0.285714, -0.166667, 0.0909091, 0.230769]
>>> import paddle
>>> import paddle.nn.functional as F
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = F.softsign(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.28571430, -0.16666666, 0.09090909, 0.23076925])
""",
)
......@@ -208,13 +214,13 @@ def acos(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.acos(x)
print(out)
# [1.98231317 1.77215425 1.47062891 1.26610367]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.acos(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.98231316, 1.77215421, 1.47062886, 1.26610363])
"""
if in_dynamic_mode():
return _C_ops.acos(x)
......@@ -245,13 +251,13 @@ def acosh(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1., 3., 4., 5.])
out = paddle.acosh(x)
print(out)
# [0. , 1.76274729, 2.06343699, 2.29243159]
>>> import paddle
>>> x = paddle.to_tensor([1., 3., 4., 5.])
>>> out = paddle.acosh(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0. , 1.76274717, 2.06343699, 2.29243159])
"""
if in_dynamic_mode():
return _C_ops.acosh(x)
......@@ -282,13 +288,13 @@ def asin(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.asin(x)
print(out)
# [-0.41151685 -0.20135792 0.10016742 0.30469265]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.asin(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.41151685, -0.20135793, 0.10016742, 0.30469266])
"""
if in_dynamic_mode():
return _C_ops.asin(x)
......@@ -319,13 +325,13 @@ def asinh(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.asinh(x)
print(out)
# [-0.39003533, -0.19869010, 0.09983408, 0.29567307]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.asinh(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.39003533, -0.19869010, 0.09983408, 0.29567307])
"""
if in_dynamic_mode():
return _C_ops.asinh(x)
......@@ -356,13 +362,13 @@ def atan(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.atan(x)
print(out)
# [-0.38050638 -0.19739556 0.09966865 0.29145679]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.atan(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.38050640, -0.19739556, 0.09966865, 0.29145682])
"""
if in_dynamic_mode():
return _C_ops.atan(x)
......@@ -393,13 +399,13 @@ def atanh(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.atanh(x)
print(out)
# [-0.42364895, -0.20273256, 0.10033535, 0.30951962]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.atanh(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.42364895, -0.20273255, 0.10033534, 0.30951962])
"""
if in_dynamic_mode():
return _C_ops.atanh(x)
......@@ -431,13 +437,13 @@ def ceil(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.ceil(x)
print(out)
# [-0. -0. 1. 1.]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.ceil(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0., -0., 1. , 1. ])
"""
if in_dynamic_mode():
return _C_ops.ceil(x)
......@@ -470,13 +476,13 @@ def cos(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.cos(x)
print(out)
# [0.92106099 0.98006658 0.99500417 0.95533649]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.cos(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.92106098, 0.98006660, 0.99500418, 0.95533651])
"""
if in_dynamic_mode():
return _C_ops.cos(x)
......@@ -509,13 +515,13 @@ def cosh(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.cosh(x)
print(out)
# [1.08107237 1.02006676 1.00500417 1.04533851]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.cosh(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[1.08107233, 1.02006674, 1.00500417, 1.04533851])
"""
if in_dynamic_mode():
return _C_ops.cosh(x)
......@@ -547,13 +553,13 @@ def exp(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.exp(x)
print(out)
# [0.67032005 0.81873075 1.10517092 1.34985881]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.exp(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.67032003, 0.81873077, 1.10517097, 1.34985888])
"""
if in_dynamic_mode():
return _C_ops.exp(x)
......@@ -597,13 +603,13 @@ def expm1(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.expm1(x)
print(out)
# [-0.32967997, -0.18126924, 0.10517092, 0.34985882]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.expm1(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.32967997, -0.18126924, 0.10517092, 0.34985882])
"""
if in_dynamic_mode():
return _C_ops.expm1(x)
......@@ -638,13 +644,13 @@ def floor(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.floor(x)
print(out)
# [-1. -1. 0. 0.]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.floor(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-1., -1., 0., 0.])
"""
if in_dynamic_mode():
return _C_ops.floor(x)
......@@ -676,13 +682,13 @@ def reciprocal(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.reciprocal(x)
print(out)
# [-2.5 -5. 10. 3.33333333]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.reciprocal(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-2.50000000, -5. , 10. , 3.33333325])
"""
if in_dynamic_mode():
return _C_ops.reciprocal(x)
......@@ -723,13 +729,13 @@ def round(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
out = paddle.round(x)
print(out)
# [-1. -0. 1. 2.]
>>> import paddle
>>> x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
>>> out = paddle.round(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-1., -0., 1., 2.])
"""
if in_dynamic_mode():
return _C_ops.round(x)
......@@ -762,13 +768,13 @@ def rsqrt(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
out = paddle.rsqrt(x)
print(out)
# [3.16227766 2.23606798 1.82574186 1.58113883]
>>> import paddle
>>> x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
>>> out = paddle.rsqrt(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[3.16227770, 2.23606801, 1.82574177, 1.58113885])
"""
if in_dynamic_mode():
return _C_ops.rsqrt(x)
......@@ -799,14 +805,14 @@ def sigmoid(x, name=None):
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.sigmoid(x)
print(out)
# [0.40131234 0.450166 0.52497919 0.57444252]
>>> import paddle
>>> import paddle.nn.functional as F
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = F.sigmoid(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.40131235, 0.45016602, 0.52497917, 0.57444251])
"""
if in_dynamic_mode():
return _C_ops.sigmoid(x)
......@@ -837,13 +843,13 @@ def sin(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.sin(x)
print(out)
# [-0.38941834 -0.19866933 0.09983342 0.29552021]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.sin(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.38941833, -0.19866933, 0.09983342, 0.29552022])
"""
if in_dynamic_mode():
return _C_ops.sin(x)
......@@ -874,13 +880,13 @@ def sinh(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.sinh(x)
print(out)
# [-0.41075233 -0.201336 0.10016675 0.30452029]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.sinh(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.41075233, -0.20133601, 0.10016675, 0.30452031])
"""
if in_dynamic_mode():
return _C_ops.sinh(x)
......@@ -911,12 +917,13 @@ def sqrt(x, name=None):
Examples:
.. code-block:: python
import paddle
>>> import paddle
x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
out = paddle.sqrt(x)
print(out)
# [0.31622777 0.4472136 0.54772256 0.63245553]
>>> x = paddle.to_tensor([0.1, 0.2, 0.3, 0.4])
>>> out = paddle.sqrt(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.31622776, 0.44721359, 0.54772258, 0.63245553])
"""
if in_dynamic_mode():
return _C_ops.sqrt(x)
......@@ -950,12 +957,13 @@ def square(x, name=None):
Examples:
.. code-block:: python
import paddle
>>> import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.square(x)
print(out)
# [0.16 0.04 0.01 0.09]
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.square(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.16000001, 0.04000000, 0.01000000, 0.09000000])
"""
if in_dynamic_mode():
return _C_ops.square(x)
......@@ -999,13 +1007,13 @@ def tan(x, name=None):
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.tan(x)
print(out)
# [-0.42279324, -0.20271005, 0.10033467, 0.30933627]
>>> import paddle
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.tan(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.42279324, -0.20271003, 0.10033467, 0.30933627])
"""
if in_dynamic_mode():
return _C_ops.tan(x)
......@@ -1055,10 +1063,11 @@ Examples:
.. code-block:: python
import paddle
>>> import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.erf(x)
print(out)
# [-0.42839236 -0.22270259 0.11246292 0.32862676]
>>> x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
>>> out = paddle.erf(x)
>>> print(out)
Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.42839241, -0.22270259, 0.11246292, 0.32862678])
"""
......@@ -17,3 +17,4 @@ librosa==0.8.1
parameterized
wandb>=0.13
xlsxwriter==3.0.9
xdoctest
......@@ -21,8 +21,6 @@ for example, you can run cpu version testing like this:
python sampcd_processor.py cpu
"""
import argparse
import inspect
import logging
import multiprocessing
import os
......@@ -33,6 +31,17 @@ import subprocess
import sys
import time
from sampcd_processor_utils import ENV_KEY_TEST_CAPACITY # noqa: F401
from sampcd_processor_utils import (
API_DIFF_SPEC_FN,
extract_code_blocks_from_docstr,
get_full_api_from_pr_spec,
get_incrementapi,
parse_args,
run_doctest,
)
from sampcd_processor_xdoctest import Xdoctester
logger = logging.getLogger()
if logger.handlers:
console = logger.handlers[
......@@ -47,12 +56,8 @@ RUN_ON_DEVICE = 'cpu'
SAMPLE_CODE_TEST_CAPACITY = set()
GPU_ID = 0
whl_error = []
API_DEV_SPEC_FN = 'paddle/fluid/API_DEV.spec'
API_PR_SPEC_FN = 'paddle/fluid/API_PR.spec'
API_DIFF_SPEC_FN = 'dev_pr_diff_api.spec'
SAMPLECODE_TEMPDIR = 'samplecode_temp'
ENV_KEY_CODES_FRONTEND = 'CODES_INSERTED_INTO_FRONTEND'
ENV_KEY_TEST_CAPACITY = 'SAMPLE_CODE_TEST_CAPACITY'
SUMMARY_INFO = {
'success': [],
'failed': [],
......@@ -106,117 +111,6 @@ def find_last_future_line_end(cbstr):
return None
def extract_code_blocks_from_docstr(docstr):
"""
extract code-blocks from the given docstring.
DON'T include the multiline-string definition in code-blocks.
The *Examples* section must be the last.
Args:
docstr(str): docstring
Return:
code_blocks: A list of code-blocks, indent removed.
element {'name': the code-block's name, 'id': sequence id.
'codes': codes, 'required': 'gpu'}
"""
code_blocks = []
mo = re.search(r"Examples:", docstr)
if mo is None:
return code_blocks
ds_list = docstr[mo.start() :].replace("\t", ' ').split("\n")
lastlineindex = len(ds_list) - 1
cb_start_pat = re.compile(r"code-block::\s*python")
cb_param_pat = re.compile(r"^\s*:(\w+):\s*(\S*)\s*$")
cb_required_pat = re.compile(r"^\s*#\s*require[s|d]\s*:\s*(\S+)\s*$")
cb_info = {}
cb_info['cb_started'] = False
cb_info['cb_cur'] = []
cb_info['cb_cur_indent'] = -1
cb_info['cb_cur_name'] = None
cb_info['cb_cur_seq_id'] = 0
cb_info['cb_required'] = None
def _cb_started():
# nonlocal cb_started, cb_cur_name, cb_required, cb_cur_seq_id
cb_info['cb_started'] = True
cb_info['cb_cur_seq_id'] += 1
cb_info['cb_cur_name'] = None
cb_info['cb_required'] = None
def _append_code_block():
# nonlocal code_blocks, cb_cur, cb_cur_name, cb_cur_seq_id, cb_required
code_blocks.append(
{
'codes': inspect.cleandoc("\n" + "\n".join(cb_info['cb_cur'])),
'name': cb_info['cb_cur_name'],
'id': cb_info['cb_cur_seq_id'],
'required': cb_info['cb_required'],
}
)
for lineno, linecont in enumerate(ds_list):
if re.search(cb_start_pat, linecont):
if not cb_info['cb_started']:
_cb_started()
continue
else:
# cur block end
if len(cb_info['cb_cur']):
_append_code_block()
_cb_started() # another block started
cb_info['cb_cur_indent'] = -1
cb_info['cb_cur'] = []
else:
if cb_info['cb_started']:
# handle the code-block directive's options
mo_p = cb_param_pat.match(linecont)
if mo_p:
if mo_p.group(1) == 'name':
cb_info['cb_cur_name'] = mo_p.group(2)
continue
# read the required directive
mo_r = cb_required_pat.match(linecont)
if mo_r:
cb_info['cb_required'] = mo_r.group(1)
# docstring end
if lineno == lastlineindex:
mo = re.search(r"\S", linecont)
if (
mo is not None
and cb_info['cb_cur_indent'] <= mo.start()
):
cb_info['cb_cur'].append(linecont)
if len(cb_info['cb_cur']):
_append_code_block()
break
# check indent for cur block start and end.
mo = re.search(r"\S", linecont)
if mo is None:
continue
if cb_info['cb_cur_indent'] < 0:
# find the first non empty line
cb_info['cb_cur_indent'] = mo.start()
cb_info['cb_cur'].append(linecont)
else:
if cb_info['cb_cur_indent'] <= mo.start():
cb_info['cb_cur'].append(linecont)
else:
if linecont[mo.start()] == '#':
continue
else:
# block end
if len(cb_info['cb_cur']):
_append_code_block()
cb_info['cb_started'] = False
cb_info['cb_cur_indent'] = -1
cb_info['cb_cur'] = []
return code_blocks
def get_test_capacity():
"""
collect capacities and set to SAMPLE_CODE_TEST_CAPACITY
......@@ -325,6 +219,15 @@ def insert_codes_into_codeblock(codeblock, apiname='not-specified'):
return inserted_codes_f + cb + inserted_codes_b
def is_ps_wrapped_codeblock(codeblock):
"""If the codeblock is wrapped by PS1(>>> ),
we skip test and use xdoctest instead.
"""
codes = codeblock['codes']
match_obj = re.search(r"\n>>>\s?", "\n" + codes)
return match_obj is not None
def sampcd_extract_to_file(srccom, name, htype="def", hname=""):
"""
Extract sample codes from __doc__, and write them to files.
......@@ -368,6 +271,15 @@ Please use '.. code-block:: python' to format the sample code."""
sample_code_filenames = []
for y, cb in enumerate(codeblocks):
if is_ps_wrapped_codeblock(cb):
SUMMARY_INFO['skiptest'].append("{}-{}".format(name, cb['id']))
logger.info(
'{}\' code block (name:{}, id:{}) is wrapped by PS1(>>> ), which will be tested by xdoctest.'.format(
name, cb['name'], cb['id']
)
)
continue
matched = is_required_match(cb['required'], name)
# matched has three states:
# True - please execute it;
......@@ -513,175 +425,6 @@ def get_filenames(full_test=False):
return all_sample_code_filenames
def get_api_md5(path):
"""
read the api spec file, and scratch the md5sum value of every api's docstring.
Args:
path: the api spec file. ATTENTION the path relative
Returns:
api_md5(dict): key is the api's real fullname, value is the md5sum.
"""
api_md5 = {}
API_spec = os.path.abspath(os.path.join(os.getcwd(), "..", path))
if not os.path.isfile(API_spec):
return api_md5
pat = re.compile(r'\((paddle[^,]+)\W*document\W*([0-9a-z]{32})')
patArgSpec = re.compile(
r'^(paddle[^,]+)\s+\(ArgSpec.*document\W*([0-9a-z]{32})'
)
with open(API_spec) as f:
for line in f.readlines():
mo = pat.search(line)
if not mo:
mo = patArgSpec.search(line)
if mo:
api_md5[mo.group(1)] = mo.group(2)
return api_md5
def get_full_api():
"""
get all the apis
"""
global API_DIFF_SPEC_FN # readonly
from print_signatures import get_all_api_from_modulelist
member_dict = get_all_api_from_modulelist()
with open(API_DIFF_SPEC_FN, 'w') as f:
f.write("\n".join(member_dict.keys()))
def get_full_api_by_walk():
"""
get all the apis
"""
global API_DIFF_SPEC_FN # readonly
from print_signatures import get_all_api
apilist = get_all_api()
with open(API_DIFF_SPEC_FN, 'w') as f:
f.write("\n".join([ai[0] for ai in apilist]))
def get_full_api_from_pr_spec():
"""
get all the apis
"""
global API_PR_SPEC_FN, API_DIFF_SPEC_FN # readonly
pr_api = get_api_md5(API_PR_SPEC_FN)
if len(pr_api):
with open(API_DIFF_SPEC_FN, 'w') as f:
f.write("\n".join(pr_api.keys()))
else:
get_full_api_by_walk()
def get_incrementapi():
'''
this function will get the apis that difference between API_DEV.spec and API_PR.spec.
'''
global API_DEV_SPEC_FN, API_PR_SPEC_FN, API_DIFF_SPEC_FN # readonly
dev_api = get_api_md5(API_DEV_SPEC_FN)
pr_api = get_api_md5(API_PR_SPEC_FN)
with open(API_DIFF_SPEC_FN, 'w') as f:
for key in pr_api:
if key in dev_api:
if dev_api[key] != pr_api[key]:
logger.debug(
"%s in dev is %s, different from pr's %s",
key,
dev_api[key],
pr_api[key],
)
f.write(key)
f.write('\n')
else:
logger.debug("%s is not in dev", key)
f.write(key)
f.write('\n')
def exec_gen_doc():
result = True
cmd = ["bash", "document_preview.sh"]
logger.info("----exec gen_doc----")
start_time = time.time()
subprc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, error = subprc.communicate()
msg = "".join(output.decode(encoding='utf-8'))
err = "".join(error.decode(encoding='utf-8'))
end_time = time.time()
if subprc.returncode != 0:
logger.info("----gen_doc msg----")
logger.info(msg)
logger.error("----gen_doc error msg----")
logger.error(err)
logger.error("----exec gen_doc failed----")
result = False
else:
logger.info("----gen_doc msg----")
logger.info(msg)
logger.info("----exec gen_doc success----")
for fn in [
'/docs/en/develop/index_en.html',
'/docs/zh/develop/index_cn.html',
]:
if os.path.exists(fn):
logger.info('%s exists.', fn)
else:
logger.error('%s not exists.', fn)
# msg is the returned code execution report
return result, msg, end_time - start_time
arguments = [
# flags, dest, type, default, help
['--gpu_id', 'gpu_id', int, 0, 'GPU device id to use [0]'],
['--logf', 'logf', str, None, 'file for logging'],
['--threads', 'threads', int, 0, 'sub processes number'],
]
def parse_args():
"""
Parse input arguments
"""
global arguments
parser = argparse.ArgumentParser(description='run Sample Code Test')
# parser.add_argument('--cpu', dest='cpu_mode', action="store_true",
# help='Use CPU mode (overrides --gpu)')
# parser.add_argument('--gpu', dest='gpu_mode', action="store_true")
parser.add_argument('--debug', dest='debug', action="store_true")
parser.add_argument('--full-test', dest='full_test', action="store_true")
parser.add_argument('mode', type=str, help='run on device', default='cpu')
parser.add_argument(
'--build-doc',
dest='build_doc',
action='store_true',
help='build doc if need.',
)
for item in arguments:
parser.add_argument(
item[0], dest=item[1], help=item[4], type=item[2], default=item[3]
)
if len(sys.argv) == 1:
args = parser.parse_args(['cpu'])
return args
# parser.print_help()
# sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.debug:
......@@ -722,96 +465,107 @@ if __name__ == '__main__':
filenames = get_filenames(args.full_test)
if len(filenames) == 0 and len(whl_error) == 0:
logger.info("-----API_PR.spec is the same as API_DEV.spec-----")
sys.exit(0)
logger.info("API_PR is diff from API_DEV: %s", filenames)
threads = multiprocessing.cpu_count()
if args.threads:
threads = args.threads
po = multiprocessing.Pool(threads)
results = po.map_async(execute_samplecode, filenames.keys())
po.close()
po.join()
result = results.get()
# delete temp files
if not args.debug:
shutil.rmtree(SAMPLECODE_TEMPDIR)
stdout_handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(stdout_handler)
logger.info("----------------End of the Check--------------------")
if len(whl_error) != 0:
logger.info("%s is not in whl.", whl_error)
logger.info("")
logger.info("Please check the whl package and API_PR.spec!")
logger.info("You can follow these steps in order to generate API.spec:")
logger.info("1. cd ${paddle_path}, compile paddle;")
logger.info("2. pip install build/python/dist/(build whl package);")
logger.info(
"3. run 'python tools/print_signatures.py paddle > paddle/fluid/API.spec'."
)
for temp in result:
if not temp[0]:
logger.info(
"In addition, mistakes found in sample codes: %s", temp[1]
)
logger.info("----------------------------------------------------")
sys.exit(1)
else:
timeovered_test = {}
for temp in result:
if not temp[0]:
logger.info(
"In addition, mistakes found in sample codes: %s", temp[1]
)
SUMMARY_INFO['failed'].append(temp[1])
else:
SUMMARY_INFO['success'].append(temp[1])
if temp[3] > 10:
timeovered_test[temp[1]] = temp[3]
# not exit if no filenames, we should do xdoctest later.
# sys.exit(0)
# delete temp files
if not args.debug:
shutil.rmtree(SAMPLECODE_TEMPDIR)
if len(timeovered_test):
else:
logger.info("API_PR is diff from API_DEV: %s", filenames)
threads = multiprocessing.cpu_count()
if args.threads:
threads = args.threads
po = multiprocessing.Pool(threads)
results = po.map_async(execute_samplecode, filenames.keys())
po.close()
po.join()
result = results.get()
# delete temp files
if not args.debug:
shutil.rmtree(SAMPLECODE_TEMPDIR)
stdout_handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(stdout_handler)
logger.info("----------------End of the Check--------------------")
if len(whl_error) != 0:
logger.info("%s is not in whl.", whl_error)
logger.info("")
logger.info("Please check the whl package and API_PR.spec!")
logger.info(
"%d sample codes ran time over 10s", len(timeovered_test)
"You can follow these steps in order to generate API.spec:"
)
if args.debug:
for k, v in timeovered_test.items():
logger.info(f'{k} - {v}s')
if len(SUMMARY_INFO['success']):
logger.info("1. cd ${paddle_path}, compile paddle;")
logger.info("2. pip install build/python/dist/(build whl package);")
logger.info(
"%d sample codes ran success", len(SUMMARY_INFO['success'])
"3. run 'python tools/print_signatures.py paddle > paddle/fluid/API.spec'."
)
for k, v in SUMMARY_INFO.items():
if k not in ['success', 'failed', 'skiptest', 'nocodes']:
for temp in result:
if not temp[0]:
logger.info(
"In addition, mistakes found in sample codes: %s",
temp[1],
)
logger.info("----------------------------------------------------")
sys.exit(1)
else:
timeovered_test = {}
for temp in result:
if not temp[0]:
logger.info(
"In addition, mistakes found in sample codes: %s",
temp[1],
)
SUMMARY_INFO['failed'].append(temp[1])
else:
SUMMARY_INFO['success'].append(temp[1])
if temp[3] > 10:
timeovered_test[temp[1]] = temp[3]
if len(timeovered_test):
logger.info(
"%d sample codes required not match for %s", len(v), k
"%d sample codes ran time over 10s", len(timeovered_test)
)
if len(SUMMARY_INFO['skiptest']):
logger.info(
"%d sample codes skipped", len(SUMMARY_INFO['skiptest'])
)
if args.debug:
logger.info('\n'.join(SUMMARY_INFO['skiptest']))
if len(SUMMARY_INFO['nocodes']):
logger.info(
"%d apis don't have sample codes", len(SUMMARY_INFO['nocodes'])
)
if args.debug:
logger.info('\n'.join(SUMMARY_INFO['nocodes']))
if len(SUMMARY_INFO['failed']):
logger.info(
"%d sample codes ran failed", len(SUMMARY_INFO['failed'])
)
logger.info('\n'.join(SUMMARY_INFO['failed']))
logger.info(
"Mistakes found in sample codes. Please recheck the sample codes."
)
sys.exit(1)
if args.debug:
for k, v in timeovered_test.items():
logger.info(f'{k} - {v}s')
if len(SUMMARY_INFO['success']):
logger.info(
"%d sample codes ran success", len(SUMMARY_INFO['success'])
)
for k, v in SUMMARY_INFO.items():
if k not in ['success', 'failed', 'skiptest', 'nocodes']:
logger.info(
"%d sample codes required not match for %s", len(v), k
)
if len(SUMMARY_INFO['skiptest']):
logger.info(
"%d sample codes skipped", len(SUMMARY_INFO['skiptest'])
)
if args.debug:
logger.info('\n'.join(SUMMARY_INFO['skiptest']))
if len(SUMMARY_INFO['nocodes']):
logger.info(
"%d apis don't have sample codes",
len(SUMMARY_INFO['nocodes']),
)
if args.debug:
logger.info('\n'.join(SUMMARY_INFO['nocodes']))
if len(SUMMARY_INFO['failed']):
logger.info(
"%d sample codes ran failed", len(SUMMARY_INFO['failed'])
)
logger.info('\n'.join(SUMMARY_INFO['failed']))
logger.info(
"Mistakes found in sample codes. Please recheck the sample codes."
)
sys.exit(1)
logger.info("Sample code check is successful!")
logger.info("Sample code check is successful!")
if args.mode == "cpu":
# As cpu mode is also run with the GPU whl, so skip it in gpu mode.
exec_gen_doc()
# run xdoctest
run_doctest(args, doctester=Xdoctester(debug=args.debug))
# 将 xdoctest 引入到飞桨框架工作流中(补充) - 详细设计
|领域 | 将 xdoctest 引入到飞桨框架工作流中 |
|---|--------------------------------|
|提交作者 | megemini (柳顺) |
|提交时间 | 2023-07-16 |
|版本号 | V1.1 |
|依赖飞桨版本 | develop 分支 |
|文件名 | sampcd_processor_readme.md |
# 概述
本文为 [《将 xdoctest 引入到飞桨框架工作流中》](https://github.com/PaddlePaddle/community/pull/547) 的补充,主要介绍引入 `xdoctest` 后使用 `Doctester` 以及 `Xdoctester` 的详细设计,以及对原有代码测试 `sampcd_processor.py` 的重构。
本文涉及以下文件:
- `sampcd_processor_utils.py` : 代码检查的相关工具
- `sampcd_processor_xdoctest.py` : `Xdoctester` 的相关实现
- `sampcd_processor.py` : 原代码检查工具
- `test_sampcd_processor.py` : 原代码检查工具单元测试
- `test_sampcd_processor_xdoctest.py` : `Xdoctester` 单元测试
# 总体设计
[《将 xdoctest 引入到飞桨框架工作流中》](https://github.com/PaddlePaddle/community/pull/547) 一文中,将代码检查分为:
- 接口抽取
- 示例执行
- 结果比对
三个主要阶段,引入 `xdoctest` 后,以上三个阶段的分工为:
- 接口抽取 : 沿用原流程 -> `sampcd_processor_utils.py`
- 示例执行 : 使用 `xdoctest` -> `sampcd_processor_xdoctest.py`
- 结果比对 : 使用 `xdoctest` -> `sampcd_processor_xdoctest.py`
具体实现步骤为(参考 `sampcd_processor_utils.py``run_doctest` 函数):
1. `init_logger(debug=args.debug, log_file=args.logf)`
日志初始化
2. `run_on_device = check_test_mode(mode=args.mode, gpu_id=args.gpu_id)`
检查测试模式
3. `sample_code_test_capacity = get_test_capacity(run_on_device)`
获取测试环境
4. `docstrings_to_test, whl_error = get_docstring(full_test=args.full_test)`
抽取测试 docstring
5. `doctester.prepare(sample_code_test_capacity)`
准备 doctester
6. `test_results = get_test_results(doctester, docstrings_to_test)`
运行代码检查
7. `doctester.print_summary(test_results, whl_error)`
打印检查结果
8. `exec_gen_doc()` 可选
生成文档
其中步骤 `1` `2` `3` `4` 沿用原代码检查逻辑, `5` `6` 为使用 `Xdoctester` 进行代码检查与结果比对, `7` `8` 沿用原代码检查逻辑。
由于需要兼容目前的代码检查,将原有工具进行重构:
- 修改 `sampcd_processor.py`:
- 将 docstring 抽取以及此流程之前的函数,抽取为公共函数,移到 `sampcd_processor_utils.py` 中。
- 重新从 `sampcd_processor_utils.py` 中引入这些公共函数。
- 增加 `is_ps_wrapped_codeblock` 函数,判断是否是 `>>> ` 的示例代码。
- 修改 `sampcd_extract_to_file`,对于 `is_ps_wrapped_codeblock` 的代码不做检查。
- 在 `if __name__ == "__main__"` 最后的执行部分,对于没有抽取到代码,不做 `sys.exit(0)`,因为后续还需要 `xdoctest` 的检查。
- 在 `if __name__ == "__main__"` 最后的执行部分,增加 `xdoctest` 的检查。
- 在 `if __name__ == "__main__"` 最后的执行部分,移除 `exec_gen_doc` 方法,在 `xdoctest` 最后一起调用。
- 增加 `sampcd_processor_utils.py`
- 增加 docstring 抽取以及此流程之前的函数,以及 args 与一些常量。移除可变 `global`,部分函数有些许修改,整体逻辑不变。
- 增加基础类 `TestResult` 与 `Doctester`。
- 增加 `run_doctest` 函数以及内部调用的其他函数,作为 doctest 的总入口。
- 增加 `sampcd_processor_xdoctest.py`
- 增加 `Xdoctester`,是 `xdoctest` 的 `Doctester` 实现。
- 增加 `if __name__ == "__main__"`,使其可以单独运行。
# 代码检查 `Doctester`
此方案中引入 `Doctester` 作为代码检查的基类,主要出于以下考虑:
- 原代码检查工具的 python 代码内部耦合较严重,如:
- 内部逻辑绑定,`get_filenames` 只能用于原代码抽取。
- 使用可变的 `global` 变量,状态跟踪困难。
- 检查逻辑遵从原代码检查的逻辑,插入新方法会破坏原逻辑。
导致在其上添加 `xdoctest` 会进一步恶化代码的可维护性。
- 引入 `Doctester` 可以分离 docstring 的抽取与代码检查的逻辑,从而方便引入 python 原生 `doctest` 或者 `xdoctest`,以及未来其他的代码检查工具。
## `Doctester` 的属性与方法
具体请参考代码中的注释,这里简单说明。
### 属性
#### `style`
代码检查服从的样式,如 `google`, `freeform`
注意,Paddle 目前的代码块是在 `.. code-block:: python` 中,而 `doctest``xdoctest` 只关心是否有 PS1 (>>> ) 的包裹,`google` 样式则是只检查 `Examples:` 中的代码。这是目前主流的代码检查工具与 Paddle 不同的地方,所以,需要沿用 Paddle 目前的 `codeblock` 抽取过程。
#### `target`
代码检查的输入是 `codeblock` 还是 `docstring`,目前 Paddle 主要以 `codeblock` 为检查单元。
结合 `style` 参数,目前合适的方式为:
- `style = freeform`
- `target = codeblock`
也就是说,抽取 `codeblock` 作为检查单元,而其中只要使用 `>>> ``... ` 包裹的部分即为代码。
这里补充说明一下:
- 为什么不能用 `style = freeform` `target = docstring` 的模式
因为,目前 Paddle 中存在 `.. code-block:: text` 等代码部分,这里面的代码大多只是描述或者说明,不需要保证其正确性,而如果其中代码包裹了 `>>> `,就会被 `xdoctest` 捕获,从而报错。
- 为什么不能用 `style = google` `target = docstring` 的模式
因为,目前 Paddle 在 `Examples:` 之外的部分,也存在 `.. code-block:: python` 需要检查的代码。
- 为什么不能用 `style = google` `target = codeblock` 的模式
可以,`Doctester` 中的 `ensemble_docstring` 方法可以将 `codeblock` 转为含有 `Examples:` 的 `docstring` 样式,但是,多此一举。
- 既然只有一种合适的模式,那么为什么要做这么多选择?
简单说,为了以后的扩展与维护。如,以后不使用 `.. code-block::` 等情况。
#### `directives`
`Doctester` 支持的指令可以保存在此变量中。目前主要的作用是列举所支持的指令列表,帮助进行指令的转换,未来可以做指令检查、指令映射等。
这里说明一下后续建议的示例代码书写格式。
- 示例代码写在 `.. code-block:: python` 内部。
-`>>> ` 表示代码开始,以 `... ` 表示代码的延续。
-`>>> ``... ` 后面紧接的一行,如果没有上述两个提示符,则表示代码输出。
- 在代码中,以 `# doctest:` 表示测试指令。
- 以至少一个空行表示代码段结束。
- 其他没有提示符的地方为说明文字。
这里需要特别注意,所有代码的缩进需要统一。
正确的代码段,如:
``` python
def something():
""" Function summary ...
Some description ...
.. code-block:: python
:name: code-example-0
this is some blabla...
>>> # doctest: +SKIP
>>> print(1+1)
2
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> # doctest: +REQUIRES(env:GPU, env:XPU)
>>> for i in range(2):
... print(i)
0
1
"""
```
错误的代码段,如, 没有正确使用 `.. code-block:: python`
``` python
def something():
""" Function summary ...
Some description ...
>>> # doctest: +SKIP
>>> print(1+1)
2
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> # doctest: +REQUIRES(env:GPU, env:XPU)
>>> for i in range(2):
... print(i)
0
1
"""
```
错误的代码段,如, 没有正确缩进:
``` python
def something():
""" Function summary ...
Some description ...
.. code-block:: python
:name: code-example-0
this is some blabla...
>>> # doctest: +SKIP
>>> print(1+1)
2
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> # doctest: +REQUIRES(env:GPU, env:XPU)
>>> for i in range(2):
... print(i)
0
1
"""
```
错误的代码段,如,使用特定代码检查工具的指令:
``` python
def something():
""" Function summary ...
Some description ...
.. code-block:: python
:name: code-example-0
this is some blabla...
>>> # xdoctest: +SKIP
>>> print(1+1)
2
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> # xdoctest: +REQUIRES(env:GPU, env:XPU)
>>> for i in range(2):
... print(i)
0
1
"""
```
这里特别说明:
- 不建议使用特定检查工具的指令,如 `# xdoctest: +SKIP` 等。
因为,特定的指令会绑定特定的检查工具,由于示例代码的修改工作量较大,如果后续不使用此工具了,则可能需要重新大面积的修改示例代码。
所以,这里建议,Paddle 统一制定一套代码检查的指令,再利用 `Doctester` 的 `convert_directive` 方法,在每次检查的时候,动态修改指令为此次测试工具需要的指令样式。
结合 python 原生的 `doctest` 与 `xdoctest` 工具的指令样式,这里建议指令样式为:
```
directive ::= "#" "doctest:" directive_option
directive_option ::= on_or_off directive_option_name [env_option]
on_or_off ::= "+" | "-"
directive_option_name ::= "SKIP" | "REQUIRES" | ...
env_option ::= "(" env_entity ("," env_entity)* ")"
env_entity ::= "env:" env
env ::= "CPU" | "GPU" | "XPU" | "DISTRIBUTED" | ...
```
此样式与 `xdoctest` 的指令样式主要不同是,使用 `doctest` 代替 `xdoctest`。
特别需要注意其中的大小写,正确的指令如:
- `# doctest: +SKIP`
- `# doctest: +REQUIRES(env:GPU)`
- `# doctest: +REQUIRES(env:GPU, env:XPU)`
错误的指令如:
- `# xdoctest: +SKIP` 使用错误的前缀
- `# doctest: +REQUIRES(env:gpu)` 使用错误的小写
- `# doctest: + REQUIRES(env:GPU)` 使用错误的空格
`doctest`,`xdoctest`,Paddle 的指令关系为:
- `doctest` 为最小子集
- `xdoctest` 为 `doctest` 的超集,指令前缀由 `doctest` 改为 `xdoctest`
- Paddle 与 `xdoctest` 基本一致,指令前缀由 `xdoctest` 改为 `doctest`
也就是说,尽量兼容 python 原生指令样式,并做扩展。
>
> **参考**
> `doctest` 的指令定义[如下](https://docs.python.org/3/library/doctest.html#directives):
> ```
> directive ::= "#" "doctest:" directive_options
> directive_options ::= directive_option ("," directive_option)*
> directive_option ::= on_or_off directive_option_name
> on_or_off ::= "+" | "-"
> directive_option_name ::= "DONT_ACCEPT_BLANKLINE" | "NORMALIZE_WHITESPACE" | ...
> ```
>
- 建议使用 python 的控制台编写并复制代码。
python 的控制台默认以 `>>> ` 作为 PS1,这样可以最大化兼容性。
也可以使用 `ipython`,但拷贝代码之后需要手动修改 PS1。
- 建议执行代码之前,执行 `>>> paddle.device.set_device('cpu')`,代码检查工具中已默认执行此命令。
这样可以统一 `tensor` 的 `place` 为 `Place(cpu)`,如果需要 `gpu` 等,请显性的在示例代码中设置,并添加指令,如:
```python
>>> import paddle
>>> a = paddle.to_tensor(0.1)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.10000000])
>>> # doctest: +REQUIRES(env:GPU)
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(0.1)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.10000000])
```
最后,使用上述的代码书写格式与指令格式,如果后续需要改变示例样式也相对简单,如,需要改成不使用 PS 的示例代码,则只需要去掉 PS1/PS2,并 comment 其他部分即可。
### 方法
#### `ensemble_docstring`
`codeblock` 包装为 `docstring`,如,添加 `Examples:` 在字符串的开头,并在每行前添加缩进。
此方法主要是将,非 `google` 样式的代码段,转为 `google` 样式使用。
#### `convert_directive`
将 docstring 中的检查指令,转换为当前工具的样式。如,将 `# doctest: +SKIP` 转换为 `# xdoctest: +SKIP`
#### `prepare`
根据当前的测试环境进行一些设置,如,`xdoctest` 需要 `os.environ` 进行 `REQUIRES` 的判断,则可以在此方法中进行设置。
这里对于 `xdoctest` 需要 `gpu` 等,只是简单的设置 `os.environ['GPU'] = "True"`。如果存在环境变量冲突,需要重新设计。
另外,此处的变量名大小写需要与指令中的一致,如 `# doctest: +REQUIRES(env:GPU)`
#### `run`
运行代码检查。
#### `print_summary`
打印出检查的结果。由于 `xdoctest` 中对于检查结果的返回样式与当前返回的不太相同,如,如果不满足 `REQUIRES` 则直接 skip,没有返回是由于什么 skip,所以,这里将 `print_summary` 作为 `Doctester` 的方法,而不是一个单独的函数。
# 其他类与方法
## TestResult
这里只是简单的将测试结果做一个封装,后续有其他需求可以再扩展。
## Xdoctester
`xdoctest``Doctester` 实现。基本逻辑符合 `Doctester` 的约定,这里只简单说明两个参数:
- `mode='native'`
这是 `xdoctest` 的检查模式,还可以是 `pytest`,但是这里没有用到,只是留个传参的入口。
- `verbose=2`
`0` 基本没什么输出,`1` 会输出简单的检查通过与否,`2` 可以输出具体错误的地方。
这里先设置为 `2`,后续程序运行稳定了可以慢慢降级。
## 一些保留的函数
- `get_api_md5`
- `get_incrementapi`
- `get_full_api_by_walk`
- `get_full_api_from_pr_spec`
- `get_full_api`
- `extract_code_blocks_from_docstr`
- `get_test_capacity`
- `exec_gen_doc`
- `parse_args`
- `get_filenames` -> `get_docstring`
# 最后
## 当前检查代码的移除
如果后续需要移除当前原有的代码检查,可以:
- 移除 `sampcd_processor.py`
-`sampcd_processor_xdoctest.py` 改名为 `sampcd_processor.py`
- 移除 `test_sampcd_processor.py`,可以保留部分测试函数。
## Paddle docs 需要注意
目前 Paddle docs 对于 `>>> ` 代码的处理是,strip 掉此提示符,然后交给原有代码检查工具进行检测。这种方法在大部分情况下没什么问题,但是,如果代码中有 `requires` 项,则可能检查失败。所以,后续需要修改 Paddle docs 的检查逻辑,建议对于 `>>> ` 直接跳过,与当前 Paddle 的 `sampcd_processor.py` 一致。最后收尾的时候,移除掉 Paddle docs 的代码检查。
# 参考资料
- doctest — Test interactive Python examples, https://docs.python.org/3/library/doctest.html#module-doctest
- Xdoctest - Execute Doctests, https://xdoctest.readthedocs.io/en/latest/index.html
此差异已折叠。
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
please make sure to run in the tools path
usage: python sampcd_processor_xdoctest.py {cpu or gpu}
{cpu or gpu}: running in cpu version or gpu version
for example, you can run cpu version testing like this:
python sampcd_processor_xdoctest.py cpu
"""
import functools
import logging
import os
import platform
import re
import sys
import time
import typing
import xdoctest
from sampcd_processor_utils import (
TEST_TIMEOUT,
DocTester,
TestResult,
logger,
parse_args,
run_doctest,
)
XDOCTEST_CONFIG = {
"global_exec": r"\n".join(
[
"import paddle",
"paddle.device.set_device('cpu')",
]
),
"analysis": "auto",
"options": "+IGNORE_WHITESPACE",
}
class Xdoctester(DocTester):
"""A Xdoctest doctester."""
def __init__(
self,
debug=False,
style='freeform',
target='codeblock',
mode='native',
verbose=2,
**config,
):
self.debug = debug
self.style = style
self.target = target
self.mode = mode
self.verbose = verbose
self.config = {**XDOCTEST_CONFIG, **(config or {})}
# patch xdoctest global_state
from xdoctest import global_state
_debug_xdoctest = debug and verbose > 2
global_state.DEBUG = _debug_xdoctest
global_state.DEBUG_PARSER = (
global_state.DEBUG_PARSER and _debug_xdoctest
)
global_state.DEBUG_CORE = global_state.DEBUG_CORE and _debug_xdoctest
global_state.DEBUG_RUNNER = (
global_state.DEBUG_RUNNER and _debug_xdoctest
)
global_state.DEBUG_DOCTEST = (
global_state.DEBUG_DOCTEST and _debug_xdoctest
)
self.docstring_parser = functools.partial(
xdoctest.core.parse_docstr_examples, style=self.style
)
self.directive_pattern = re.compile(
r"""
(?<=(\#\s{1})) # positive lookbehind, directive begins
(doctest) # directive prefix, which should be replaced
(?= # positive lookahead, directive content
(
:\s+
[\+\-]
(REQUIRES|SKIP)
(\((env\s*:\s*(CPU|GPU|XPU|DISTRIBUTED)\s*,?\s*)+\))?
)
\s*\n+
)""",
re.X,
)
self.directive_prefix = 'xdoctest'
def convert_directive(self, docstring: str) -> str:
"""Replace directive prefix with xdoctest"""
return self.directive_pattern.sub(self.directive_prefix, docstring)
def prepare(self, test_capacity: set):
"""Set environs for xdoctest directive.
The keys in environs, which also used in `# xdoctest: +REQUIRES(env:XX)`, should be UPPER case.
If `test_capacity = {"cpu"}`, then we set:
- `os.environ["CPU"] = "True"`
which makes this SKIPPED:
- # xdoctest: +REQUIRES(env:GPU)
If `test_capacity = {"cpu", "gpu"}`, then we set:
- `os.environ["CPU"] = "True"`
- `os.environ["GPU"] = "True"`
which makes this SUCCESS:
- # xdoctest: +REQUIRES(env:GPU)
"""
logger.info("Set xdoctest environ ...")
for capacity in test_capacity:
key = capacity.upper()
os.environ[key] = "True"
logger.info("Environ: %s , set to True.", key)
logger.info("API check using Xdoctest prepared!-- Example Code")
logger.info("running under python %s", platform.python_version())
logger.info("running under xdoctest %s", xdoctest.__version__)
def run(self, api_name: str, docstring: str) -> typing.List[TestResult]:
"""Run the xdoctest with a docstring."""
examples_to_test, examples_nocode = self._extract_examples(
api_name, docstring
)
return self._execute_xdoctest(examples_to_test, examples_nocode)
def _extract_examples(self, api_name, docstring):
"""Extract code block examples from docstring."""
examples_to_test = {}
examples_nocode = {}
for example_idx, example in enumerate(
self.docstring_parser(docstr=docstring, callname=api_name)
):
example.mode = self.mode
example.config.update(self.config)
example_key = f"{api_name}_{example_idx}"
# check whether there are some parts parsed by xdoctest
if not example._parts:
examples_nocode[example_key] = example
continue
examples_to_test[example_key] = example
if not examples_nocode and not examples_to_test:
examples_nocode[api_name] = api_name
return examples_to_test, examples_nocode
def _execute_xdoctest(self, examples_to_test, examples_nocode):
"""Run xdoctest for each example"""
test_results = []
for _, example in examples_to_test.items():
start_time = time.time()
result = example.run(verbose=self.verbose, on_error='return')
end_time = time.time()
test_results.append(
TestResult(
name=str(example),
passed=result['passed'],
skipped=result['skipped'],
failed=result['failed'],
test_msg=result['exc_info'],
time=end_time - start_time,
)
)
for _, example in examples_nocode.items():
test_results.append(TestResult(name=str(example), nocode=True))
return test_results
def print_summary(self, test_results, whl_error):
summary_success = []
summary_failed = []
summary_skiptest = []
summary_nocodes = []
stdout_handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(stdout_handler)
logger.info("----------------End of the Check--------------------")
if len(whl_error) != 0:
logger.info("%s is not in whl.", whl_error)
logger.info("")
logger.info("Please check the whl package and API_PR.spec!")
logger.info(
"You can follow these steps in order to generate API.spec:"
)
logger.info("1. cd ${paddle_path}, compile paddle;")
logger.info("2. pip install build/python/dist/(build whl package);")
logger.info(
"3. run 'python tools/print_signatures.py paddle > paddle/fluid/API.spec'."
)
for test_result in test_results:
if test_result.failed:
logger.info(
"In addition, mistakes found in sample codes: %s",
test_result.name,
)
logger.info("----------------------------------------------------")
sys.exit(1)
else:
timeovered_test = {}
for test_result in test_results:
if not test_result.nocode:
if test_result.passed:
summary_success.append(test_result.name)
if test_result.skipped:
summary_skiptest.append(test_result.name)
if test_result.failed:
logger.info(
"In addition, mistakes found in sample codes: %s",
test_result.name,
)
summary_failed.append(test_result.name)
if test_result.time > TEST_TIMEOUT:
timeovered_test[test_result.name] = test_result.time
else:
summary_nocodes.append(test_result.name)
if len(timeovered_test):
logger.info(
"%d sample codes ran time over 10s", len(timeovered_test)
)
if self.debug:
for k, v in timeovered_test.items():
logger.info(f'{k} - {v}s')
if len(summary_success):
logger.info("%d sample codes ran success", len(summary_success))
if len(summary_skiptest):
logger.info("%d sample codes skipped", len(summary_skiptest))
if self.debug:
logger.info('\n'.join(summary_skiptest))
if len(summary_nocodes):
logger.info(
"%d apis don't have sample codes", len(summary_nocodes)
)
if self.debug:
logger.info('\n'.join(summary_nocodes))
if len(summary_failed):
logger.info("%d sample codes ran failed", len(summary_failed))
logger.info('\n'.join(summary_failed))
logger.info(
"Mistakes found in sample codes. Please recheck the sample codes."
)
sys.exit(1)
logger.info("Sample code check is successful!")
if __name__ == '__main__':
args = parse_args()
run_doctest(args, doctester=Xdoctester(debug=args.debug))
......@@ -25,13 +25,12 @@ from sampcd_processor import (
extract_code_blocks_from_docstr,
find_all,
find_last_future_line_end,
get_api_md5,
get_incrementapi,
get_test_capacity,
insert_codes_into_codeblock,
is_required_match,
sampcd_extract_to_file,
)
from sampcd_processor_utils import get_api_md5, get_incrementapi
class Test_find_all(unittest.TestCase):
......@@ -114,6 +113,7 @@ class Test_extract_code_blocks_from_docstr(unittest.TestCase):
'name': None,
'id': 1,
'required': None,
'in_examples': True,
}
],
)
......@@ -142,6 +142,7 @@ class Test_extract_code_blocks_from_docstr(unittest.TestCase):
'name': None,
'id': 1,
'required': None,
'in_examples': True,
},
{
'codes': """# required: gpu
......@@ -149,6 +150,7 @@ print(1+1)""",
'name': 'one_plus_one',
'id': 2,
'required': 'gpu',
'in_examples': True,
},
],
)
......@@ -479,6 +481,20 @@ class Test_sampcd_extract_to_file(unittest.TestCase):
sampcd_processor.SUMMARY_INFO['distributed'], [funcname + '-5']
)
def test_skip_ps_wrapped_code(self):
comments = """
placeholder
Examples:
.. code-block:: python
>>> print(1 + 1)
2
"""
funcname = 'one_plus_one'
sample_code_filenames = sampcd_extract_to_file(comments, funcname)
self.assertCountEqual([], sample_code_filenames)
class Test_get_api_md5(unittest.TestCase):
def setUp(self):
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册