未验证 提交 2dbd47b2 编写于 作者: M megemini 提交者: GitHub

Fix convert directive (#55811)

* [Change] directive_pattern to all patterns

* [Change] patch tensor place

* [Change] patch float precision

* [Change] print_summary

* [Add] change uniform.py docstring

* [Fix] fix print_summary whl_error

* [Change] catch except
上级 859fc01b
......@@ -62,33 +62,42 @@ class Uniform(distribution.Distribution):
Examples:
.. code-block:: python
import paddle
from paddle.distribution import Uniform
# Without broadcasting, a single uniform distribution [3, 4]:
u1 = Uniform(low=3.0, high=4.0)
# 2 distributions [1, 3], [2, 4]
u2 = Uniform(low=[1.0, 2.0], high=[3.0, 4.0])
# 4 distributions
u3 = Uniform(low=[[1.0, 2.0], [3.0, 4.0]],
high=[[1.5, 2.5], [3.5, 4.5]])
# With broadcasting:
u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0])
# Complete example
value_tensor = paddle.to_tensor([0.8], dtype="float32")
uniform = Uniform([0.], [2.])
sample = uniform.sample([2])
# a random tensor created by uniform distribution with shape: [2, 1]
entropy = uniform.entropy()
# [0.6931472] with shape: [1]
lp = uniform.log_prob(value_tensor)
# [-0.6931472] with shape: [1]
p = uniform.probs(value_tensor)
# [0.5] with shape: [1]
>>> import paddle
>>> from paddle.distribution import Uniform
>>> paddle.seed(2023)
>>> # Without broadcasting, a single uniform distribution [3, 4]:
>>> u1 = Uniform(low=3.0, high=4.0)
>>> # 2 distributions [1, 3], [2, 4]
>>> u2 = Uniform(low=[1.0, 2.0], high=[3.0, 4.0])
>>> # 4 distributions
>>> u3 = Uniform(low=[[1.0, 2.0], [3.0, 4.0]],
... high=[[1.5, 2.5], [3.5, 4.5]])
...
>>> # With broadcasting:
>>> u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0])
>>> # Complete example
>>> value_tensor = paddle.to_tensor([0.8], dtype="float32")
>>> uniform = Uniform([0.], [2.])
>>> sample = uniform.sample([2])
>>> # a random tensor created by uniform distribution with shape: [2, 1]
>>> entropy = uniform.entropy()
>>> print(entropy)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.69314718])
>>> lp = uniform.log_prob(value_tensor)
>>> print(lp)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.69314718])
>>> p = uniform.probs(value_tensor)
>>> print(p)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.50000000])
"""
def __init__(self, low, high, name=None):
......
......@@ -48,11 +48,124 @@ XDOCTEST_CONFIG = {
"paddle.device.set_device('cpu')",
]
),
"analysis": "auto",
"options": "+IGNORE_WHITESPACE",
"default_runtime_state": {"IGNORE_WHITESPACE": True},
}
def _patch_global_state(debug, verbose):
# patch xdoctest global_state
from xdoctest import global_state
_debug_xdoctest = debug and verbose > 2
global_state.DEBUG = _debug_xdoctest
global_state.DEBUG_PARSER = global_state.DEBUG_PARSER and _debug_xdoctest
global_state.DEBUG_CORE = global_state.DEBUG_CORE and _debug_xdoctest
global_state.DEBUG_RUNNER = global_state.DEBUG_RUNNER and _debug_xdoctest
global_state.DEBUG_DOCTEST = global_state.DEBUG_DOCTEST and _debug_xdoctest
def _patch_tensor_place():
from xdoctest import checker
pattern_tensor = re.compile(
r"""
(Tensor\(.*?place=) # Tensor start
(.*?) # Place=(XXX)
(\,.*?\))
""",
re.X | re.S,
)
_check_output = checker.check_output
def check_output(got, want, runstate=None):
if not want: # nocover
return True
return _check_output(
got=pattern_tensor.sub(r'\1Place(cpu)\3', got),
want=pattern_tensor.sub(r'\1Place(cpu)\3', want),
runstate=runstate,
)
checker.check_output = check_output
def _patch_float_precision(digits):
from xdoctest import checker
pattern_number = re.compile(
r"""
(?:
(?<=[\s*\[\(\'\"\:]) # number starts
(?: # int/float or complex-real
(?:
[+-]?
(?:
(?: \d*\.\d+) | (?: \d+\.?) # int/float
)
)
(?:[Ee][+-]?\d+)?
)
(?: # complex-imag
(?:
(?:
[+-]?
(?:
(?: \d*\.\d+) | (?: \d+\.?)
)
)
(?:[Ee][+-]?\d+)?
)
(?:[Jj])
)?
)
""",
re.X | re.S,
)
_check_output = checker.check_output
def _sub_number(match_obj, digits):
match_str = match_obj.group()
if 'j' in match_str or 'J' in match_str:
try:
match_num = complex(match_str)
except ValueError:
return match_str
return (
str(
complex(
round(match_num.real, digits),
round(match_num.imag, digits),
)
)
.strip('(')
.strip(')')
)
else:
try:
return str(round(float(match_str), digits))
except ValueError:
return match_str
sub_number = functools.partial(_sub_number, digits=digits)
def check_output(got, want, runstate=None):
if not want: # nocover
return True
return _check_output(
got=pattern_number.sub(sub_number, got),
want=pattern_number.sub(sub_number, want),
runstate=runstate,
)
checker.check_output = check_output
class Xdoctester(DocTester):
"""A Xdoctest doctester."""
......@@ -63,6 +176,10 @@ class Xdoctester(DocTester):
target='codeblock',
mode='native',
verbose=2,
patch_global_state=True,
patch_tensor_place=True,
patch_float_precision=True,
patch_float_digits=5,
**config,
):
self.debug = debug
......@@ -73,21 +190,14 @@ class Xdoctester(DocTester):
self.verbose = verbose
self.config = {**XDOCTEST_CONFIG, **(config or {})}
# patch xdoctest global_state
from xdoctest import global_state
if patch_global_state:
_patch_global_state(self.debug, self.verbose)
_debug_xdoctest = debug and verbose > 2
global_state.DEBUG = _debug_xdoctest
global_state.DEBUG_PARSER = (
global_state.DEBUG_PARSER and _debug_xdoctest
)
global_state.DEBUG_CORE = global_state.DEBUG_CORE and _debug_xdoctest
global_state.DEBUG_RUNNER = (
global_state.DEBUG_RUNNER and _debug_xdoctest
)
global_state.DEBUG_DOCTEST = (
global_state.DEBUG_DOCTEST and _debug_xdoctest
)
if patch_tensor_place:
_patch_tensor_place()
if patch_float_precision:
_patch_float_precision(patch_float_digits)
self.docstring_parser = functools.partial(
xdoctest.core.parse_docstr_examples, style=self.style
......@@ -95,19 +205,13 @@ class Xdoctester(DocTester):
self.directive_pattern = re.compile(
r"""
(?<=(\#\s{1})) # positive lookbehind, directive begins
(?<=(\#\s)) # positive lookbehind, directive begins
(doctest) # directive prefix, which should be replaced
(?= # positive lookahead, directive content
(
:\s+
[\+\-]
(REQUIRES|SKIP)
(\((env\s*:\s*(CPU|GPU|XPU|DISTRIBUTED)\s*,?\s*)+\))?
)
\s*\n+
)""",
(?=(:\s*.*\n)) # positive lookahead, directive content
""",
re.X,
)
self.directive_prefix = 'xdoctest'
def convert_directive(self, docstring: str) -> str:
......@@ -199,7 +303,7 @@ class Xdoctester(DocTester):
return test_results
def print_summary(self, test_results, whl_error):
def print_summary(self, test_results, whl_error=None):
summary_success = []
summary_failed = []
summary_skiptest = []
......@@ -208,7 +312,7 @@ class Xdoctester(DocTester):
stdout_handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(stdout_handler)
logger.info("----------------End of the Check--------------------")
if len(whl_error) != 0:
if whl_error is not None and whl_error:
logger.info("%s is not in whl.", whl_error)
logger.info("")
logger.info("Please check the whl package and API_PR.spec!")
......@@ -239,10 +343,6 @@ class Xdoctester(DocTester):
summary_skiptest.append(test_result.name)
if test_result.failed:
logger.info(
"In addition, mistakes found in sample codes: %s",
test_result.name,
)
summary_failed.append(test_result.name)
if test_result.time > TEST_TIMEOUT:
......@@ -259,16 +359,19 @@ class Xdoctester(DocTester):
logger.info(f'{k} - {v}s')
if len(summary_success):
logger.info("%d sample codes ran success", len(summary_success))
logger.info('\n'.join(summary_success))
if len(summary_skiptest):
logger.info("%d sample codes skipped", len(summary_skiptest))
if self.debug:
logger.info('\n'.join(summary_skiptest))
if len(summary_nocodes):
logger.info(
"%d apis don't have sample codes", len(summary_nocodes)
"%d apis could not run test or don't have sample codes",
len(summary_nocodes),
)
if self.debug:
logger.info('\n'.join(summary_nocodes))
if len(summary_failed):
logger.info("%d sample codes ran failed", len(summary_failed))
logger.info('\n'.join(summary_failed))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册