未验证 提交 2dbd47b2 编写于 作者: M megemini 提交者: GitHub

Fix convert directive (#55811)

* [Change] directive_pattern to all patterns

* [Change] patch tensor place

* [Change] patch float precision

* [Change] print_summary

* [Add] change uniform.py docstring

* [Fix] fix print_summary whl_error

* [Change] catch except
上级 859fc01b
...@@ -62,33 +62,42 @@ class Uniform(distribution.Distribution): ...@@ -62,33 +62,42 @@ class Uniform(distribution.Distribution):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle.distribution import Uniform >>> from paddle.distribution import Uniform
>>> paddle.seed(2023)
# Without broadcasting, a single uniform distribution [3, 4]:
u1 = Uniform(low=3.0, high=4.0) >>> # Without broadcasting, a single uniform distribution [3, 4]:
# 2 distributions [1, 3], [2, 4] >>> u1 = Uniform(low=3.0, high=4.0)
u2 = Uniform(low=[1.0, 2.0], high=[3.0, 4.0]) >>> # 2 distributions [1, 3], [2, 4]
# 4 distributions >>> u2 = Uniform(low=[1.0, 2.0], high=[3.0, 4.0])
u3 = Uniform(low=[[1.0, 2.0], [3.0, 4.0]], >>> # 4 distributions
high=[[1.5, 2.5], [3.5, 4.5]]) >>> u3 = Uniform(low=[[1.0, 2.0], [3.0, 4.0]],
... high=[[1.5, 2.5], [3.5, 4.5]])
# With broadcasting: ...
u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) >>> # With broadcasting:
>>> u4 = Uniform(low=3.0, high=[5.0, 6.0, 7.0])
# Complete example
value_tensor = paddle.to_tensor([0.8], dtype="float32") >>> # Complete example
>>> value_tensor = paddle.to_tensor([0.8], dtype="float32")
uniform = Uniform([0.], [2.])
>>> uniform = Uniform([0.], [2.])
sample = uniform.sample([2])
# a random tensor created by uniform distribution with shape: [2, 1] >>> sample = uniform.sample([2])
entropy = uniform.entropy() >>> # a random tensor created by uniform distribution with shape: [2, 1]
# [0.6931472] with shape: [1] >>> entropy = uniform.entropy()
lp = uniform.log_prob(value_tensor) >>> print(entropy)
# [-0.6931472] with shape: [1] Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
p = uniform.probs(value_tensor) [0.69314718])
# [0.5] with shape: [1]
>>> lp = uniform.log_prob(value_tensor)
>>> print(lp)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[-0.69314718])
>>> p = uniform.probs(value_tensor)
>>> print(p)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.50000000])
""" """
def __init__(self, low, high, name=None): def __init__(self, low, high, name=None):
......
...@@ -48,11 +48,124 @@ XDOCTEST_CONFIG = { ...@@ -48,11 +48,124 @@ XDOCTEST_CONFIG = {
"paddle.device.set_device('cpu')", "paddle.device.set_device('cpu')",
] ]
), ),
"analysis": "auto", "default_runtime_state": {"IGNORE_WHITESPACE": True},
"options": "+IGNORE_WHITESPACE",
} }
def _patch_global_state(debug, verbose):
# patch xdoctest global_state
from xdoctest import global_state
_debug_xdoctest = debug and verbose > 2
global_state.DEBUG = _debug_xdoctest
global_state.DEBUG_PARSER = global_state.DEBUG_PARSER and _debug_xdoctest
global_state.DEBUG_CORE = global_state.DEBUG_CORE and _debug_xdoctest
global_state.DEBUG_RUNNER = global_state.DEBUG_RUNNER and _debug_xdoctest
global_state.DEBUG_DOCTEST = global_state.DEBUG_DOCTEST and _debug_xdoctest
def _patch_tensor_place():
from xdoctest import checker
pattern_tensor = re.compile(
r"""
(Tensor\(.*?place=) # Tensor start
(.*?) # Place=(XXX)
(\,.*?\))
""",
re.X | re.S,
)
_check_output = checker.check_output
def check_output(got, want, runstate=None):
if not want: # nocover
return True
return _check_output(
got=pattern_tensor.sub(r'\1Place(cpu)\3', got),
want=pattern_tensor.sub(r'\1Place(cpu)\3', want),
runstate=runstate,
)
checker.check_output = check_output
def _patch_float_precision(digits):
from xdoctest import checker
pattern_number = re.compile(
r"""
(?:
(?<=[\s*\[\(\'\"\:]) # number starts
(?: # int/float or complex-real
(?:
[+-]?
(?:
(?: \d*\.\d+) | (?: \d+\.?) # int/float
)
)
(?:[Ee][+-]?\d+)?
)
(?: # complex-imag
(?:
(?:
[+-]?
(?:
(?: \d*\.\d+) | (?: \d+\.?)
)
)
(?:[Ee][+-]?\d+)?
)
(?:[Jj])
)?
)
""",
re.X | re.S,
)
_check_output = checker.check_output
def _sub_number(match_obj, digits):
match_str = match_obj.group()
if 'j' in match_str or 'J' in match_str:
try:
match_num = complex(match_str)
except ValueError:
return match_str
return (
str(
complex(
round(match_num.real, digits),
round(match_num.imag, digits),
)
)
.strip('(')
.strip(')')
)
else:
try:
return str(round(float(match_str), digits))
except ValueError:
return match_str
sub_number = functools.partial(_sub_number, digits=digits)
def check_output(got, want, runstate=None):
if not want: # nocover
return True
return _check_output(
got=pattern_number.sub(sub_number, got),
want=pattern_number.sub(sub_number, want),
runstate=runstate,
)
checker.check_output = check_output
class Xdoctester(DocTester): class Xdoctester(DocTester):
"""A Xdoctest doctester.""" """A Xdoctest doctester."""
...@@ -63,6 +176,10 @@ class Xdoctester(DocTester): ...@@ -63,6 +176,10 @@ class Xdoctester(DocTester):
target='codeblock', target='codeblock',
mode='native', mode='native',
verbose=2, verbose=2,
patch_global_state=True,
patch_tensor_place=True,
patch_float_precision=True,
patch_float_digits=5,
**config, **config,
): ):
self.debug = debug self.debug = debug
...@@ -73,21 +190,14 @@ class Xdoctester(DocTester): ...@@ -73,21 +190,14 @@ class Xdoctester(DocTester):
self.verbose = verbose self.verbose = verbose
self.config = {**XDOCTEST_CONFIG, **(config or {})} self.config = {**XDOCTEST_CONFIG, **(config or {})}
# patch xdoctest global_state if patch_global_state:
from xdoctest import global_state _patch_global_state(self.debug, self.verbose)
_debug_xdoctest = debug and verbose > 2 if patch_tensor_place:
global_state.DEBUG = _debug_xdoctest _patch_tensor_place()
global_state.DEBUG_PARSER = (
global_state.DEBUG_PARSER and _debug_xdoctest if patch_float_precision:
) _patch_float_precision(patch_float_digits)
global_state.DEBUG_CORE = global_state.DEBUG_CORE and _debug_xdoctest
global_state.DEBUG_RUNNER = (
global_state.DEBUG_RUNNER and _debug_xdoctest
)
global_state.DEBUG_DOCTEST = (
global_state.DEBUG_DOCTEST and _debug_xdoctest
)
self.docstring_parser = functools.partial( self.docstring_parser = functools.partial(
xdoctest.core.parse_docstr_examples, style=self.style xdoctest.core.parse_docstr_examples, style=self.style
...@@ -95,19 +205,13 @@ class Xdoctester(DocTester): ...@@ -95,19 +205,13 @@ class Xdoctester(DocTester):
self.directive_pattern = re.compile( self.directive_pattern = re.compile(
r""" r"""
(?<=(\#\s{1})) # positive lookbehind, directive begins (?<=(\#\s)) # positive lookbehind, directive begins
(doctest) # directive prefix, which should be replaced (doctest) # directive prefix, which should be replaced
(?= # positive lookahead, directive content (?=(:\s*.*\n)) # positive lookahead, directive content
( """,
:\s+
[\+\-]
(REQUIRES|SKIP)
(\((env\s*:\s*(CPU|GPU|XPU|DISTRIBUTED)\s*,?\s*)+\))?
)
\s*\n+
)""",
re.X, re.X,
) )
self.directive_prefix = 'xdoctest' self.directive_prefix = 'xdoctest'
def convert_directive(self, docstring: str) -> str: def convert_directive(self, docstring: str) -> str:
...@@ -199,7 +303,7 @@ class Xdoctester(DocTester): ...@@ -199,7 +303,7 @@ class Xdoctester(DocTester):
return test_results return test_results
def print_summary(self, test_results, whl_error): def print_summary(self, test_results, whl_error=None):
summary_success = [] summary_success = []
summary_failed = [] summary_failed = []
summary_skiptest = [] summary_skiptest = []
...@@ -208,7 +312,7 @@ class Xdoctester(DocTester): ...@@ -208,7 +312,7 @@ class Xdoctester(DocTester):
stdout_handler = logging.StreamHandler(stream=sys.stdout) stdout_handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(stdout_handler) logger.addHandler(stdout_handler)
logger.info("----------------End of the Check--------------------") logger.info("----------------End of the Check--------------------")
if len(whl_error) != 0: if whl_error is not None and whl_error:
logger.info("%s is not in whl.", whl_error) logger.info("%s is not in whl.", whl_error)
logger.info("") logger.info("")
logger.info("Please check the whl package and API_PR.spec!") logger.info("Please check the whl package and API_PR.spec!")
...@@ -239,10 +343,6 @@ class Xdoctester(DocTester): ...@@ -239,10 +343,6 @@ class Xdoctester(DocTester):
summary_skiptest.append(test_result.name) summary_skiptest.append(test_result.name)
if test_result.failed: if test_result.failed:
logger.info(
"In addition, mistakes found in sample codes: %s",
test_result.name,
)
summary_failed.append(test_result.name) summary_failed.append(test_result.name)
if test_result.time > TEST_TIMEOUT: if test_result.time > TEST_TIMEOUT:
...@@ -259,16 +359,19 @@ class Xdoctester(DocTester): ...@@ -259,16 +359,19 @@ class Xdoctester(DocTester):
logger.info(f'{k} - {v}s') logger.info(f'{k} - {v}s')
if len(summary_success): if len(summary_success):
logger.info("%d sample codes ran success", len(summary_success)) logger.info("%d sample codes ran success", len(summary_success))
logger.info('\n'.join(summary_success))
if len(summary_skiptest): if len(summary_skiptest):
logger.info("%d sample codes skipped", len(summary_skiptest)) logger.info("%d sample codes skipped", len(summary_skiptest))
if self.debug:
logger.info('\n'.join(summary_skiptest)) logger.info('\n'.join(summary_skiptest))
if len(summary_nocodes): if len(summary_nocodes):
logger.info( logger.info(
"%d apis don't have sample codes", len(summary_nocodes) "%d apis could not run test or don't have sample codes",
len(summary_nocodes),
) )
if self.debug:
logger.info('\n'.join(summary_nocodes)) logger.info('\n'.join(summary_nocodes))
if len(summary_failed): if len(summary_failed):
logger.info("%d sample codes ran failed", len(summary_failed)) logger.info("%d sample codes ran failed", len(summary_failed))
logger.info('\n'.join(summary_failed)) logger.info('\n'.join(summary_failed))
......
...@@ -14,9 +14,11 @@ ...@@ -14,9 +14,11 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import importlib
import os import os
import unittest import unittest
import xdoctest
from sampcd_processor_utils import get_test_results from sampcd_processor_utils import get_test_results
from sampcd_processor_xdoctest import Xdoctester from sampcd_processor_xdoctest import Xdoctester
...@@ -37,7 +39,6 @@ class TestXdoctester(unittest.TestCase): ...@@ -37,7 +39,6 @@ class TestXdoctester(unittest.TestCase):
self.assertEqual(doctester.style, 'freeform') self.assertEqual(doctester.style, 'freeform')
self.assertEqual(doctester.target, 'codeblock') self.assertEqual(doctester.target, 'codeblock')
self.assertEqual(doctester.mode, 'native') self.assertEqual(doctester.mode, 'native')
self.assertEqual(doctester.config['analysis'], 'auto')
doctester = Xdoctester(analysis='static') doctester = Xdoctester(analysis='static')
self.assertEqual(doctester.config['analysis'], 'static') self.assertEqual(doctester.config['analysis'], 'static')
...@@ -49,6 +50,11 @@ class TestXdoctester(unittest.TestCase): ...@@ -49,6 +50,11 @@ class TestXdoctester(unittest.TestCase):
docstring_target = "# xdoctest: -SKIP\n" docstring_target = "# xdoctest: -SKIP\n"
self.assertEqual(docstring_output, docstring_target) self.assertEqual(docstring_output, docstring_target)
docstring_input = '# doctest: +SKIP("skip this test...")\n'
docstring_output = doctester.convert_directive(docstring_input)
docstring_target = '# xdoctest: +SKIP("skip this test...")\n'
self.assertEqual(docstring_output, docstring_target)
docstring_input = """ docstring_input = """
placeholder placeholder
...@@ -138,6 +144,674 @@ class TestXdoctester(unittest.TestCase): ...@@ -138,6 +144,674 @@ class TestXdoctester(unittest.TestCase):
class TestGetTestResults(unittest.TestCase): class TestGetTestResults(unittest.TestCase):
def test_patch_xdoctest(self):
# test patch tensor place
_clear_environ()
test_capacity = {'cpu'}
doctester = Xdoctester(style='freeform', target='codeblock')
doctester.prepare(test_capacity)
docstrings_to_test = {
'gpu_to_gpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.2)
>>> # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [0.20000000])
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.20000000])
""",
'cpu_to_cpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.2)
>>> # Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, [0.20000000])
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.20000000])
""",
'gpu_to_cpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.2)
>>> # Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True, [0.20000000])
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.20000000])
""",
'cpu_to_gpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.2)
>>> # Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True, [0.20000000])
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.20000000])
""",
'gpu_to_cpu_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor([[1,2,3], [2,3,4], [3,4,5]])
>>> # Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
>>> # [[1, 2, 3],
>>> # [2, 3, 4],
>>> # [3, 4, 5]])
>>> print(a)
Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[1, 2, 3],
[2, 3, 4],
[3, 4, 5]])
""",
'cpu_to_gpu_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor([[1,2,3], [2,3,4], [3,4,5]])
>>> # Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
>>> # [[1, 2, 3],
>>> # [2, 3, 4],
>>> # [3, 4, 5]])
>>> print(a)
Tensor(shape=[3, 3], dtype=int64, place=Place(gpu:0), stop_gradient=True,
[[1, 2, 3],
[2, 3, 4],
[3, 4, 5]])
""",
}
test_results = get_test_results(doctester, docstrings_to_test)
self.assertEqual(len(test_results), 6)
tr_0, tr_1, tr_2, tr_3, tr_4, tr_5 = test_results
self.assertIn('gpu_to_gpu', tr_0.name)
self.assertTrue(tr_0.passed)
self.assertIn('cpu_to_cpu', tr_1.name)
self.assertTrue(tr_1.passed)
self.assertIn('gpu_to_cpu', tr_2.name)
self.assertTrue(tr_2.passed)
self.assertIn('cpu_to_gpu', tr_3.name)
self.assertTrue(tr_3.passed)
self.assertIn('gpu_to_cpu_array', tr_4.name)
self.assertTrue(tr_4.passed)
self.assertIn('cpu_to_gpu_array', tr_5.name)
self.assertTrue(tr_5.passed)
# reload xdoctest.checker
importlib.reload(xdoctest.checker)
_clear_environ()
test_capacity = {'cpu'}
doctester = Xdoctester(
style='freeform', target='codeblock', patch_tensor_place=False
)
doctester.prepare(test_capacity)
test_results = get_test_results(doctester, docstrings_to_test)
self.assertEqual(len(test_results), 6)
tr_0, tr_1, tr_2, tr_3, tr_4, tr_5 = test_results
self.assertIn('gpu_to_gpu', tr_0.name)
self.assertTrue(tr_0.passed)
self.assertIn('cpu_to_cpu', tr_1.name)
self.assertTrue(tr_1.passed)
self.assertIn('gpu_to_cpu', tr_2.name)
self.assertFalse(tr_2.passed)
self.assertIn('cpu_to_gpu', tr_3.name)
self.assertFalse(tr_3.passed)
self.assertIn('gpu_to_cpu_array', tr_4.name)
self.assertFalse(tr_4.passed)
self.assertIn('cpu_to_gpu_array', tr_5.name)
self.assertFalse(tr_5.passed)
# test patch float precision
# reload xdoctest.checker
importlib.reload(xdoctest.checker)
_clear_environ()
test_capacity = {'cpu'}
doctester = Xdoctester(style='freeform', target='codeblock')
doctester.prepare(test_capacity)
docstrings_to_test = {
'gpu_to_gpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.123456780])
""",
'cpu_to_cpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.123456780])
""",
'gpu_to_cpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.123456780])
""",
'cpu_to_gpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.123456780])
""",
'gpu_to_cpu_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor([[1.123456789 ,2,3], [2,3,4], [3,4,5]])
>>> print(a)
Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[1.123456780, 2., 3.],
[2., 3., 4.],
[3., 4., 5.]])
""",
'cpu_to_gpu_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor([[1.123456789,2,3], [2,3,4], [3,4,5]])
>>> print(a)
Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[[1.123456780, 2., 3.],
[2., 3., 4.],
[3., 4., 5.]])
""",
'mass_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(
... [[1.123456780, 2., -3, .3],
... [2, 3, +4., 1.2+10.34e-5j],
... [3, 5.e-3, 1e2, 3e-8]]
... )
>>> # Tensor(shape=[3, 4], dtype=complex64, place=Place(gpu:0), stop_gradient=True,
>>> # [[ (1.1234568357467651+0j) ,
>>> # (2+0j) ,
>>> # (-3+0j) ,
>>> # (0.30000001192092896+0j) ],
>>> # [ (2+0j) ,
>>> # (3+0j) ,
>>> # (4+0j) ,
>>> # (1.2000000476837158+0.00010340000153519213j)],
>>> # [ (3+0j) ,
>>> # (0.004999999888241291+0j) ,
>>> # (100+0j) ,
>>> # (2.999999892949745e-08+0j) ]])
>>> print(a)
Tensor(shape=[3, 4], dtype=complex64, place=Place(AAA), stop_gradient=True,
[[ (1.123456+0j),
(2+0j),
(-3+0j),
(0.3+0j)],
[ (2+0j),
(3+0j),
(4+0j),
(1.2+0.00010340j)],
[ (3+0j),
(0.00499999+0j),
(100+0j),
(2.999999e-08+0j)]])
""",
'float_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> x = [[2, 3, 4], [7, 8, 9]]
>>> x = paddle.to_tensor(x, dtype='float32')
>>> print(paddle.log(x))
Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.69314718, 1.09861231, 1.38629436],
[1.94591010, 2.07944155, 2.19722462]])
""",
'float_array_diff': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> x = [[2, 3, 4], [7, 8, 9]]
>>> x = paddle.to_tensor(x, dtype='float32')
>>> print(paddle.log(x))
Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.69314712, 1.09861221, 1.386294],
[1.94591032, 2.07944156, 2.1972246]])
""",
}
test_results = get_test_results(doctester, docstrings_to_test)
self.assertEqual(len(test_results), 9)
tr_0, tr_1, tr_2, tr_3, tr_4, tr_5, tr_6, tr_7, tr_8 = test_results
self.assertIn('gpu_to_gpu', tr_0.name)
self.assertTrue(tr_0.passed)
self.assertIn('cpu_to_cpu', tr_1.name)
self.assertTrue(tr_1.passed)
self.assertIn('gpu_to_cpu', tr_2.name)
self.assertTrue(tr_2.passed)
self.assertIn('cpu_to_gpu', tr_3.name)
self.assertTrue(tr_3.passed)
self.assertIn('gpu_to_cpu_array', tr_4.name)
self.assertTrue(tr_4.passed)
self.assertIn('cpu_to_gpu_array', tr_5.name)
self.assertTrue(tr_5.passed)
self.assertIn('mass_array', tr_6.name)
self.assertTrue(tr_6.passed)
self.assertIn('float_array', tr_7.name)
self.assertTrue(tr_7.passed)
self.assertIn('float_array_diff', tr_8.name)
self.assertTrue(tr_8.passed)
# reload xdoctest.checker
importlib.reload(xdoctest.checker)
_clear_environ()
test_capacity = {'cpu'}
doctester = Xdoctester(
style='freeform', target='codeblock', patch_float_precision=False
)
doctester.prepare(test_capacity)
docstrings_to_test = {
'gpu_to_gpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.123456780])
""",
'cpu_to_cpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.123456780])
""",
'gpu_to_cpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
[0.123456780])
""",
'cpu_to_gpu': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor(.123456789)
>>> print(a)
Tensor(shape=[1], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[0.123456780])
""",
'gpu_to_cpu_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor([[1.123456789 ,2,3], [2,3,4], [3,4,5]])
>>> print(a)
Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[1.123456780, 2., 3.],
[2., 3., 4.],
[3., 4., 5.]])
""",
'cpu_to_gpu_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> a = paddle.to_tensor([[1.123456789,2,3], [2,3,4], [3,4,5]])
>>> print(a)
Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[[1.123456780, 2., 3.],
[2., 3., 4.],
[3., 4., 5.]])
""",
'mass_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> a = paddle.to_tensor(
... [[1.123456780, 2., -3, .3],
... [2, 3, +4., 1.2+10.34e-5j],
... [3, 5.e-3, 1e2, 3e-8]]
... )
>>> # Tensor(shape=[3, 4], dtype=complex64, place=Place(gpu:0), stop_gradient=True,
>>> # [[ (1.1234568357467651+0j) ,
>>> # (2+0j) ,
>>> # (-3+0j) ,
>>> # (0.30000001192092896+0j) ],
>>> # [ (2+0j) ,
>>> # (3+0j) ,
>>> # (4+0j) ,
>>> # (1.2000000476837158+0.00010340000153519213j)],
>>> # [ (3+0j) ,
>>> # (0.004999999888241291+0j) ,
>>> # (100+0j) ,
>>> # (2.999999892949745e-08+0j) ]])
>>> print(a)
Tensor(shape=[3, 4], dtype=complex64, place=Place(AAA), stop_gradient=True,
[[ (1.123456+0j),
(2+0j),
(-3+0j),
(0.3+0j)],
[ (2+0j),
(3+0j),
(4+0j),
(1.2+0.00010340j)],
[ (3+0j),
(0.00499999+0j),
(100+0j),
(2.999999e-08+0j)]])
""",
'float_array': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> x = [[2, 3, 4], [7, 8, 9]]
>>> x = paddle.to_tensor(x, dtype='float32')
>>> print(paddle.log(x))
Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.69314718, 1.09861231, 1.38629436],
[1.94591010, 2.07944155, 2.19722462]])
""",
'float_array_diff': """
placeholder
Examples:
.. code-block:: python
:name: code-example-1
this is some blabla...
>>> import paddle
>>> paddle.device.set_device('cpu')
>>> x = [[2, 3, 4], [7, 8, 9]]
>>> x = paddle.to_tensor(x, dtype='float32')
>>> print(paddle.log(x))
Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0.69314712, 1.09861221, 1.386294],
[1.94591032, 2.07944156, 2.1972246]])
""",
}
test_results = get_test_results(doctester, docstrings_to_test)
self.assertEqual(len(test_results), 9)
tr_0, tr_1, tr_2, tr_3, tr_4, tr_5, tr_6, tr_7, tr_8 = test_results
self.assertIn('gpu_to_gpu', tr_0.name)
self.assertFalse(tr_0.passed)
self.assertIn('cpu_to_cpu', tr_1.name)
self.assertFalse(tr_1.passed)
self.assertIn('gpu_to_cpu', tr_2.name)
self.assertFalse(tr_2.passed)
self.assertIn('cpu_to_gpu', tr_3.name)
self.assertFalse(tr_3.passed)
self.assertIn('gpu_to_cpu_array', tr_4.name)
self.assertFalse(tr_4.passed)
self.assertIn('cpu_to_gpu_array', tr_5.name)
self.assertFalse(tr_5.passed)
self.assertIn('mass_array', tr_6.name)
self.assertFalse(tr_6.passed)
self.assertIn('float_array', tr_7.name)
self.assertTrue(tr_7.passed)
self.assertIn('float_array_diff', tr_8.name)
self.assertFalse(tr_8.passed)
def test_run_cpu(self): def test_run_cpu(self):
_clear_environ() _clear_environ()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册