未验证 提交 42aad304 编写于 作者: R Ren Wei (任卫) 提交者: GitHub

use the `required` instruction to determine if the environment fits the sample...

use the `required` instruction to determine if the environment fits the sample code's required. (#32766)

* add unittests

* add find_last_future_line_end

* extract_code_blocks_from_docstr and its testcases

* test_codeblock_before_examples_is_ignored

* sampcd_extract_to_file 拆为两步

* update the codeblock element's format

* code-block directive has no value options

* insert the CODES_INTERTED_INTO_FRONTEND

* using the new func insert_codes_into_codeblock

* get_test_capacity and is_required_match

* using the new funcitons in sampcd_extract_to_file

* add some comments and refactor functions

* using logger instead of all the print

* remote wlist

* collect summary info, and print them

* call get capacity

* update summary format

* print the apis that don't have sample codes.

* print the samples the consumed time more than 10s.

print time

* update unittest testcases

* solve ResourceWarning: unclosed file

* run tools test seperately

* python2 does not have nonlocal keyword, using dict variable instead

* remove unused import, rearrange a series of conditional statements.

* remove wlist.json and its check approval

* remove wlist.json and its check approval
上级 ed9e7723
......@@ -52,7 +52,7 @@ API_FILES=("CMakeLists.txt"
"python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py"
"python/paddle/fluid/tests/unittests/white_list/check_op_sequence_batch_1_input_white_list.py"
"python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py"
"tools/wlist.json"
"tools/print_signatures.py"
"tools/sampcd_processor.py"
"paddle/scripts/paddle_build.bat"
"tools/windows/run_unittests.sh"
......@@ -80,11 +80,10 @@ function add_failed(){
echo_list="${echo_list[@]}$1"
}
function run_test_sampcd_processor() {
function run_tools_test() {
CUR_PWD=$(pwd)
cd ${PADDLE_ROOT}/tools
python test_sampcd_processor.py
python test_print_signatures.py
python $1
cd ${CUR_PWD}
}
......@@ -141,12 +140,12 @@ for API_FILE in ${API_FILES[*]}; do
elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py" ];then
echo_line="You must have one RD (Shixiaowei02 (Recommend), luotao1 or phlrain) approval for the python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py, which manages the white list of no_grad_set without value in operators. For more information, please refer to[https://github.com/PaddlePaddle/Paddle/wiki/It's-recommend-to-set-no_grad_set-to-be-None].\n"
check_approval 1 39303645 6836917 43953930
elif [ "${API_FILE}" == "tools/wlist.json" ];then
echo_line="You must have one TPM (jzhang533) approval for the api whitelist for the tools/wlist.json.\n"
check_approval 1 29231
elif [ "${API_FILE}" == "tools/sampcd_processor.py" ];then
echo_line="test_sampcd_processor.py will be executed for changed sampcd_processor.py.\n"
run_test_sampcd_processor
run_tools_test test_sampcd_processor.py
elif [ "${API_FILE}" == "tools/print_signatures.py" ];then
echo_line="test_print_signatures.py will be executed for changed print_signatures.py.\n"
run_tools_test test_print_signatures.py
elif [ "${API_FILE}" == "python/paddle/distributed/fleet/__init__.py" ]; then
echo_line="You must have (fuyinno4 (Recommend), raindrops2sea) approval for ${API_FILE} changes"
check_approval 1 35824027 38231817
......
......@@ -11,12 +11,20 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
please make sure to run in the tools path
usage: python sample_test.py {cpu or gpu}
{cpu or gpu}: running in cpu version or gpu version
for example, you can run cpu version python2 testing like this:
python sampcd_processor.py cpu
"""
import os
import sys
import subprocess
import multiprocessing
import math
import platform
import inspect
import json
......@@ -24,16 +32,7 @@ import argparse
import shutil
import re
import logging
"""
please make sure to run in the tools path
usage: python sample_test.py {cpu or gpu}
{cpu or gpu}: running in cpu version or gpu version
for example, you can run cpu version python2 testing like this:
python sampcd_processor.py cpu
"""
import time
logger = logging.getLogger()
if logger.handlers:
......@@ -45,6 +44,7 @@ else:
console.setFormatter(logging.Formatter("%(message)s"))
RUN_ON_DEVICE = 'cpu'
SAMPLE_CODE_TEST_CAPACITY = set()
GPU_ID = 0
methods = []
whl_error = []
......@@ -52,6 +52,15 @@ API_DEV_SPEC_FN = 'paddle/fluid/API_DEV.spec'
API_PR_SPEC_FN = 'paddle/fluid/API_PR.spec'
API_DIFF_SPEC_FN = 'dev_pr_diff_api.spec'
SAMPLECODE_TEMPDIR = 'samplecode_temp'
ENV_KEY_CODES_FRONTEND = 'CODES_INSERTED_INTO_FRONTEND'
ENV_KEY_TEST_CAPACITY = 'SAMPLE_CODE_TEST_CAPACITY'
SUMMARY_INFO = {
'success': [],
'failed': [],
'skiptest': [],
'nocodes': [],
# ... required not-match
}
def find_all(srcstr, substr):
......@@ -75,32 +84,225 @@ def find_all(srcstr, substr):
return indices
def check_indent(cdline):
def find_last_future_line_end(cbstr):
"""
find the last `__future__` line.
Args:
docstr(str): docstring
Return:
index of the line end or None.
"""
to check the indent of a given code line
pat = re.compile('__future__.*\n')
lastmo = None
it = re.finditer(pat, cbstr)
while True:
try:
lastmo = next(it)
except StopIteration:
break
if lastmo:
return lastmo.end()
else:
return None
to get the number of starting blank chars,
e.t. blankspaces and \t
\t will be interpreted as 4 single blankspaces,
e.t. '\t'=' '
def extract_code_blocks_from_docstr(docstr):
"""
extract code-blocks from the given docstring.
DON'T include the multiline-string definition in code-blocks.
The *Examples* section must be the last.
Args:
cdline(str) : a single line of code from the source file
docstr(str): docstring
Return:
code_blocks: A list of code-blocks, indent removed.
element {'name': the code-block's name, 'id': sequence id.
'codes': codes, 'required': 'gpu'}
"""
code_blocks = []
mo = re.search(r"Examples:", docstr)
if mo is None:
return code_blocks
ds_list = docstr[mo.start():].replace("\t", ' ').split("\n")
lastlineindex = len(ds_list) - 1
cb_start_pat = re.compile(r"code-block::\s*python")
cb_param_pat = re.compile(r"^\s*:(\w+):\s*(\S*)\s*$")
cb_required_pat = re.compile(r"^\s*#\s*require[s|d]\s*:\s*(\S+)\s*$")
cb_info = {}
cb_info['cb_started'] = False
cb_info['cb_cur'] = []
cb_info['cb_cur_indent'] = -1
cb_info['cb_cur_name'] = None
cb_info['cb_cur_seq_id'] = 0
cb_info['cb_required'] = None
def _cb_started():
# nonlocal cb_started, cb_cur_name, cb_required, cb_cur_seq_id
cb_info['cb_started'] = True
cb_info['cb_cur_seq_id'] += 1
cb_info['cb_cur_name'] = None
cb_info['cb_required'] = None
def _append_code_block():
# nonlocal code_blocks, cb_cur, cb_cur_name, cb_cur_seq_id, cb_required
code_blocks.append({
'codes': inspect.cleandoc("\n".join(cb_info['cb_cur'])),
'name': cb_info['cb_cur_name'],
'id': cb_info['cb_cur_seq_id'],
'required': cb_info['cb_required'],
})
for lineno, linecont in enumerate(ds_list):
if re.search(cb_start_pat, linecont):
if not cb_info['cb_started']:
_cb_started()
continue
else:
# cur block end
if len(cb_info['cb_cur']):
_append_code_block()
_cb_started() # another block started
cb_info['cb_cur_indent'] = -1
cb_info['cb_cur'] = []
else:
if cb_info['cb_started']:
# handle the code-block directive's options
mo_p = cb_param_pat.match(linecont)
if mo_p:
if mo_p.group(1) == 'name':
cb_info['cb_cur_name'] = mo_p.group(2)
continue
# read the required directive
mo_r = cb_required_pat.match(linecont)
if mo_r:
cb_info['cb_required'] = mo_r.group(1)
# docstring end
if lineno == lastlineindex:
mo = re.search(r"\S", linecont)
if mo is not None and cb_info['cb_cur_indent'] <= mo.start(
):
cb_info['cb_cur'].append(linecont)
if len(cb_info['cb_cur']):
_append_code_block()
break
# check indent for cur block start and end.
mo = re.search(r"\S", linecont)
if mo is None:
continue
if cb_info['cb_cur_indent'] < 0:
# find the first non empty line
cb_info['cb_cur_indent'] = mo.start()
cb_info['cb_cur'].append(linecont)
else:
if cb_info['cb_cur_indent'] <= mo.start():
cb_info['cb_cur'].append(linecont)
else:
if linecont[mo.start()] == '#':
continue
else:
# block end
if len(cb_info['cb_cur']):
_append_code_block()
cb_info['cb_started'] = False
cb_info['cb_cur_indent'] = -1
cb_info['cb_cur'] = []
return code_blocks
def get_test_capacity():
"""
collect capacities and set to SAMPLE_CODE_TEST_CAPACITY
"""
global SAMPLE_CODE_TEST_CAPACITY # write
global ENV_KEY_TEST_CAPACITY, RUN_ON_DEVICE # readonly
if ENV_KEY_TEST_CAPACITY in os.environ:
for r in os.environ[ENV_KEY_TEST_CAPACITY].split(','):
rr = r.strip().lower()
if r:
SAMPLE_CODE_TEST_CAPACITY.add(rr)
if 'cpu' not in SAMPLE_CODE_TEST_CAPACITY:
SAMPLE_CODE_TEST_CAPACITY.add('cpu')
Returns:
int : the indent of the number of interpreted
blankspaces
if RUN_ON_DEVICE:
SAMPLE_CODE_TEST_CAPACITY.add(RUN_ON_DEVICE)
def is_required_match(requirestr, cbtitle='not-specified'):
"""
indent = 0
for c in cdline:
if c == '\t':
indent += 4
elif c == ' ':
indent += 1
if c != ' ' and c != '\t':
break
return indent
search the required instruction in the code-block, and check it match the current running environment.
environment values of equipped: cpu, gpu, xpu, distributed, skip
the 'skip' is the special flag to skip the test, so is_required_match will return False directly.
Args:
requirestr(str): the required string.
cbtitle(str): the title of the code-block.
returns:
True - yes, matched
False - not match
None - skipped # trick
"""
global SAMPLE_CODE_TEST_CAPACITY # readonly
requires = set(['cpu'])
if requirestr:
for r in requirestr.split(','):
rr = r.strip().lower()
if rr:
requires.add(rr)
if 'skip' in requires or 'skiptest' in requires:
logger.info('%s: skipped', cbtitle)
return None
if all([
k in SAMPLE_CODE_TEST_CAPACITY for k in requires
if k not in ['skip', 'skiptest']
]):
return True
logger.info('%s: the equipments [%s] not match the required [%s].', cbtitle,
','.join(SAMPLE_CODE_TEST_CAPACITY), ','.join(requires))
return False
def insert_codes_into_codeblock(codeblock, apiname='not-specified'):
"""
insert some codes in the frontend and backend into the code-block.
"""
global ENV_KEY_CODES_FRONTEND, GPU_ID, RUN_ON_DEVICE # readonly
inserted_codes_f = ''
inserted_codes_b = ''
if ENV_KEY_CODES_FRONTEND in os.environ and os.environ[
ENV_KEY_CODES_FRONTEND]:
inserted_codes_f = os.environ[ENV_KEY_CODES_FRONTEND]
else:
cpu_str = '\nimport os\nos.environ["CUDA_VISIBLE_DEVICES"] = ""\n'
gpu_str = '\nimport os\nos.environ["CUDA_VISIBLE_DEVICES"] = "{}"\n'.format(
GPU_ID)
if 'required' in codeblock:
if codeblock['required'] is None or codeblock['required'] == 'cpu':
inserted_codes_f = cpu_str
elif codeblock['required'] == 'gpu':
inserted_codes_f = gpu_str
else:
if RUN_ON_DEVICE == "cpu":
inserted_codes_f = cpu_str
elif RUN_ON_DEVICE == "gpu":
inserted_codes_f = gpu_str
inserted_codes_b = '\nprint("{}\'s sample code (name:{}, id:{}) is executed successfully!")'.format(
apiname, codeblock['name'], codeblock['id'])
cb = codeblock['codes']
last_future_line_end = find_last_future_line_end(cb)
if last_future_line_end:
return cb[:last_future_line_end] + inserted_codes_f + cb[
last_future_line_end:] + inserted_codes_b
else:
return inserted_codes_f + cb + inserted_codes_b
def sampcd_extract_to_file(srccom, name, htype="def", hname=""):
......@@ -117,122 +319,111 @@ def sampcd_extract_to_file(srccom, name, htype="def", hname=""):
Returns:
sample_code_filenames(list of str)
"""
global GPU_ID, RUN_ON_DEVICE, SAMPLECODE_TEMPDIR
CODE_BLOCK_INTERDUCTORY = "code-block:: python"
global GPU_ID, RUN_ON_DEVICE, SAMPLECODE_TEMPDIR # readonly
global SUMMARY_INFO # update
sampcd_begins = find_all(srccom, CODE_BLOCK_INTERDUCTORY)
if len(sampcd_begins) == 0:
codeblocks = extract_code_blocks_from_docstr(srccom)
if len(codeblocks) == 0:
SUMMARY_INFO['nocodes'].append(name)
# detect sample codes using >>> to format and consider this situation as wrong
print(htype, " name:", hname)
print("-----------------------")
logger.info(htype + " name:" + name)
logger.info("-----------------------")
if srccom.find("Examples:") != -1:
print("----example code check----\n")
logger.info("----example code check----")
if srccom.find(">>>") != -1:
print(
"Deprecated sample code style:\n\n Examples:\n\n >>>codeline\n >>>codeline\n\n\n ",
"Please use '.. code-block:: python' to ",
"format sample code.\n")
logger.warning(r"""Deprecated sample code style:
Examples:
>>>codeline
>>>codeline
Please use '.. code-block:: python' to format the sample code.""")
return []
else:
print("Error: No sample code!\n")
logger.warning("Error: No sample code!")
return []
sample_code_filenames = []
for y in range(1, len(sampcd_begins) + 1):
sampcd_begin = sampcd_begins[y - 1]
sampcd = srccom[sampcd_begin + len(CODE_BLOCK_INTERDUCTORY) + 1:]
sampcd = sampcd.split("\n")
# remove starting empty lines
while sampcd[0].replace(' ', '').replace('\t', '') == '':
sampcd.pop(0)
# the minimum indent, which is the indent of the first
# non-empty line
min_indent = check_indent(sampcd[0])
sampcd_to_write = []
for i in range(0, len(sampcd)):
cdline = sampcd[i]
# handle empty lines or those only with spaces/tabs
if cdline.strip() == '':
continue
this_indent = check_indent(cdline)
if this_indent < min_indent:
break
else:
cdline = cdline.replace('\t', ' ')
sampcd_to_write.append(cdline[min_indent:])
sampcd = '\n'.join(sampcd_to_write)
if RUN_ON_DEVICE == "cpu":
sampcd = '\nimport os\nos.environ["CUDA_VISIBLE_DEVICES"] = ""\n' + sampcd
if RUN_ON_DEVICE == "gpu":
sampcd = '\nimport os\nos.environ["CUDA_VISIBLE_DEVICES"] = "{}"\n'.format(
GPU_ID) + sampcd
sampcd += '\nprint(' + '\"' + name + ' sample code is executed successfully!\")'
tfname = os.path.join(SAMPLECODE_TEMPDIR, '{}_example{}'.format(
name, '.py' if len(sampcd_begins) == 1 else '_{}.py'.format(y)))
with open(tfname, 'w') as tempf:
tempf.write(sampcd)
sample_code_filenames.append(tfname)
for y, cb in enumerate(codeblocks):
matched = is_required_match(cb['required'], name)
# matched has three states:
# True - please execute it;
# None - no sample code found;
# False - it need other special equipment or environment.
# so, the following conditional statements are intentionally arranged.
if matched == True:
tfname = os.path.join(SAMPLECODE_TEMPDIR, '{}_example{}'.format(
name, '.py'
if len(codeblocks) == 1 else '_{}.py'.format(y + 1)))
with open(tfname, 'w') as tempf:
sampcd = insert_codes_into_codeblock(cb, name)
tempf.write(sampcd)
sample_code_filenames.append(tfname)
elif matched is None:
logger.info('{}\' code block (name:{}, id:{}) is skipped.'.format(
name, cb['name'], cb['id']))
SUMMARY_INFO['skiptest'].append("{}-{}".format(name, cb['id']))
elif matched == False:
logger.info(
'{}\' code block (name:{}, id:{}) required({}) not match capacity({}).'.
format(name, cb['name'], cb['id'], cb['required'],
SAMPLE_CODE_TEST_CAPACITY))
if cb['required'] not in SUMMARY_INFO:
SUMMARY_INFO[cb['required']] = []
SUMMARY_INFO[cb['required']].append("{}-{}".format(name, cb['id']))
return sample_code_filenames
def execute_samplecode(tfname):
"""
Execute a sample-code test.
Execute a sample-code test
Args:
tfname: the filename of the samplecode.
tfname: the filename of the sample code
Returns:
result: success or not
tfname: same as the input argument
msg: the stdout output of the samplecode executing.
msg: the stdout output of the sample code executing
time: time consumed by sample code
"""
result = True
msg = None
if platform.python_version()[0] in ["2", "3"]:
cmd = [sys.executable, tfname]
else:
print("Error: fail to parse python version!")
logger.error("Error: fail to parse python version!")
result = False
exit(1)
# check required envisonment
with open(tfname, 'r') as f:
for line in f.readlines():
if re.match(r'#\s*required\s*:\s*(distributed|gpu|skip)', line):
result = True
return result, tfname, '{} is skipped. cause: {}'.format(tfname,
line)
logging.info('running %s', tfname)
print("\n----example code check----")
print("executing sample code .....", tfname)
logger.info("----example code check----")
logger.info("executing sample code: %s", tfname)
start_time = time.time()
subprc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = subprc.communicate()
msg = "".join(output.decode(encoding='utf-8'))
err = "".join(error.decode(encoding='utf-8'))
end_time = time.time()
if subprc.returncode != 0:
print("Sample code error found in ", tfname, ":")
print("-----------------------")
print(open(tfname).read())
print("-----------------------")
print("subprocess return code: ", str(subprc.returncode))
print("Error Raised from Sample Code ", tfname, " :")
print(err)
print(msg)
print("----example code check failed----\n")
logging.warning('%s error: %s', tfname, err)
logging.warning('%s msg: %s', tfname, msg)
with open(tfname, 'r') as f:
logger.warning("""Sample code error found in %s:
-----------------------
%s
-----------------------
subprocess return code: %d
Error Raised from Sample Code:
stderr: %s
stdout: %s
""", tfname, f.read(), subprc.returncode, err, msg)
logger.info("----example code check failed----")
result = False
else:
print("----example code check success----\n")
logger.info("----example code check success----")
# msg is the returned code execution report
return result, tfname, msg
return result, tfname, msg, end_time - start_time
def get_filenames():
......@@ -317,35 +508,6 @@ def get_incrementapi():
f.write('\n')
def get_wlist(fn="wlist.json"):
'''
this function will get the white list of API.
Returns:
wlist: a list of API that should not trigger the example check .
'''
wlist = []
wlist_file = []
# only white on CPU
gpu_not_white = []
with open(fn, 'r') as load_f:
load_dict = json.load(load_f)
for key in load_dict:
if key == 'wlist_dir':
for item in load_dict[key]:
wlist_file.append(item["name"])
elif key == "gpu_not_white":
gpu_not_white = load_dict[key]
elif key == "wlist_api":
for item in load_dict[key]:
wlist.append(item["name"])
else:
wlist = wlist + load_dict[key]
return wlist, wlist_file, gpu_not_white
arguments = [
# flags, dest, type, default, help
['--gpu_id', 'gpu_id', int, 0, 'GPU device id to use [0]'],
......@@ -391,18 +553,15 @@ if __name__ == '__main__':
))
logger.addHandler(logfHandler)
wlist, wlist_file, gpu_not_white = get_wlist()
if args.mode == "gpu":
GPU_ID = args.gpu_id
logger.info("using GPU_ID %d", GPU_ID)
for _gnw in gpu_not_white:
wlist.remove(_gnw)
elif args.mode != "cpu":
logger.error("Unrecognized argument:%s, 'cpu' or 'gpu' is desired.",
args.mode)
sys.exit("Invalid arguments")
RUN_ON_DEVICE = args.mode
get_test_capacity()
logger.info("API check -- Example Code")
logger.info("sample_test running under python %s",
platform.python_version())
......@@ -449,19 +608,50 @@ if __name__ == '__main__':
if not temp[0]:
logger.info("In addition, mistakes found in sample codes: %s",
temp[1])
logger.info("error_methods: %s", str(temp[2]))
logger.info("----------------------------------------------------")
exit(1)
else:
has_error = False
timeovered_test = {}
for temp in result:
if not temp[0]:
logger.info("In addition, mistakes found in sample codes: %s",
temp[1])
logger.info("error_methods: %s", str(temp[2]))
has_error = True
if has_error:
logger.info("Mistakes found in sample codes.")
logger.info("Please check sample codes.")
SUMMARY_INFO['failed'].append(temp[1])
else:
SUMMARY_INFO['success'].append(temp[1])
if temp[3] > 10:
timeovered_test[temp[1]] = temp[3]
if len(timeovered_test):
logger.info("%d sample codes ran time over 10s",
len(timeovered_test))
if args.debug:
for k, v in timeovered_test.items():
logger.info('{} - {}s'.format(k, v))
if len(SUMMARY_INFO['success']):
logger.info("%d sample codes ran success",
len(SUMMARY_INFO['success']))
for k, v in SUMMARY_INFO.items():
if k not in ['success', 'failed', 'skiptest', 'nocodes']:
logger.info("%d sample codes required not match for %s",
len(v), k)
if len(SUMMARY_INFO['skiptest']):
logger.info("%d sample codes skipped",
len(SUMMARY_INFO['skiptest']))
if args.debug:
logger.info('\n'.join(SUMMARY_INFO['skiptest']))
if len(SUMMARY_INFO['nocodes']):
logger.info("%d apis don't have sample codes",
len(SUMMARY_INFO['nocodes']))
if args.debug:
logger.info('\n'.join(SUMMARY_INFO['nocodes']))
if len(SUMMARY_INFO['failed']):
logger.info("%d sample codes ran failed",
len(SUMMARY_INFO['failed']))
logger.info('\n'.join(SUMMARY_INFO['failed']))
logger.info(
"Mistakes found in sample codes. Please recheck the sample codes."
)
exit(1)
logger.info("Sample code check is successful!")
......@@ -20,15 +20,18 @@ import tempfile
import shutil
import sys
import importlib
import re
import sampcd_processor
from sampcd_processor import find_all
from sampcd_processor import check_indent
from sampcd_processor import get_api_md5
from sampcd_processor import get_incrementapi
from sampcd_processor import get_wlist
from sampcd_processor import sampcd_extract_to_file
from sampcd_processor import extract_code_blocks_from_docstr
from sampcd_processor import execute_samplecode
SAMPLECODE_TEMP_DIR = 'samplecode_temp'
from sampcd_processor import find_last_future_line_end
from sampcd_processor import insert_codes_into_codeblock
from sampcd_processor import get_test_capacity
from sampcd_processor import is_required_match
class Test_find_all(unittest.TestCase):
......@@ -43,27 +46,246 @@ class Test_find_all(unittest.TestCase):
find_all(' hello, world; hello paddle!', 'hello'))
class Test_check_indent(unittest.TestCase):
def test_no_indent(self):
self.assertEqual(0, check_indent('hello paddle'))
class Test_find_last_future_line_end(unittest.TestCase):
def test_no_instant(self):
samplecodes = """
print(10//3)
"""
self.assertIsNone(find_last_future_line_end(samplecodes))
def test_1_instant(self):
samplecodes = """
from __future__ import print_function
print(10//3)
"""
mo = re.search("print_function\n", samplecodes)
self.assertIsNotNone(mo)
self.assertGreaterEqual(
find_last_future_line_end(samplecodes), mo.end())
def test_2_instant(self):
samplecodes = """
from __future__ import print_function
from __future__ import division
print(10//3)
"""
mo = re.search("division\n", samplecodes)
self.assertIsNotNone(mo)
self.assertGreaterEqual(
find_last_future_line_end(samplecodes), mo.end())
class Test_extract_code_blocks_from_docstr(unittest.TestCase):
def test_no_samplecode(self):
docstr = """
placeholder
"""
codeblocks = extract_code_blocks_from_docstr(docstr)
self.assertListEqual([], codeblocks)
def test_codeblock_before_examples_is_ignored(self):
docstr = """
.. code-block:: python
print(1+1)
Examples:
"""
codeblocks = extract_code_blocks_from_docstr(docstr)
self.assertListEqual(codeblocks, [])
def test_1_samplecode(self):
docstr = """
Examples:
.. code-block:: python
print(1+1)
"""
codeblocks = extract_code_blocks_from_docstr(docstr)
self.assertListEqual(codeblocks, [{
'codes': """print(1+1)""",
'name': None,
'id': 1,
'required': None,
}])
def test_2_samplecodes(self):
docstr = """
placeholder
Examples:
.. code-block:: python
print(1/0)
.. code-block:: python
:name: one_plus_one
:linenos:
# required: gpu
print(1+1)
"""
codeblocks = extract_code_blocks_from_docstr(docstr)
self.assertListEqual(codeblocks, [{
'codes': """print(1/0)""",
'name': None,
'id': 1,
'required': None,
}, {
'codes': """# required: gpu
print(1+1)""",
'name': 'one_plus_one',
'id': 2,
'required': 'gpu',
}])
class Test_insert_codes_into_codeblock(unittest.TestCase):
def test_required_None(self):
codeblock = {
'codes': """print(1/0)""",
'name': None,
'id': 1,
'required': None,
}
self.assertEqual("""
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
print(1/0)
print("not-specified's sample code (name:None, id:1) is executed successfully!")""",
insert_codes_into_codeblock(codeblock))
def test_required_gpu(self):
codeblock = {
'codes': """# required: gpu
print(1+1)""",
'name': None,
'id': 1,
'required': 'gpu',
}
self.assertEqual("""
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# required: gpu
print(1+1)
print("not-specified's sample code (name:None, id:1) is executed successfully!")""",
insert_codes_into_codeblock(codeblock))
def test_from_future(self):
codeblock = {
'codes': """
from __future__ import print_function
from __future__ import division
print(10//3)""",
'name': 'future',
'id': 1,
'required': None,
}
self.assertEqual("""
from __future__ import print_function
from __future__ import division
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
print(10//3)
print("not-specified's sample code (name:future, id:1) is executed successfully!")""",
insert_codes_into_codeblock(codeblock))
def clear_capacity():
sampcd_processor.SAMPLE_CODE_TEST_CAPACITY = set()
sampcd_processor.RUN_ON_DEVICE = 'cpu'
if sampcd_processor.ENV_KEY_TEST_CAPACITY in os.environ:
del os.environ[sampcd_processor.ENV_KEY_TEST_CAPACITY]
def test_indent_4_spaces(self):
self.assertEqual(4, check_indent(' hello paddle'))
def test_indent_1_tab(self):
self.assertEqual(4, check_indent("\thello paddle"))
class Test_get_test_capacity(unittest.TestCase):
def setUp(self):
clear_capacity()
get_test_capacity()
def tearDown(self):
clear_capacity()
get_test_capacity()
def test_NoEnvVar(self):
clear_capacity()
get_test_capacity()
self.assertCountEqual(['cpu', ],
sampcd_processor.SAMPLE_CODE_TEST_CAPACITY)
def test_NoEnvVar_RUN_ON_DEVICE_gpu(self):
clear_capacity()
sampcd_processor.RUN_ON_DEVICE = 'gpu'
get_test_capacity()
self.assertCountEqual(['cpu', 'gpu'],
sampcd_processor.SAMPLE_CODE_TEST_CAPACITY)
def test_EnvVar_gpu(self):
clear_capacity()
os.environ[sampcd_processor.ENV_KEY_TEST_CAPACITY] = 'gpu'
get_test_capacity()
self.assertCountEqual(['cpu', 'gpu'],
sampcd_processor.SAMPLE_CODE_TEST_CAPACITY)
def test_EnvVar_gpu_and_distributed(self):
clear_capacity()
os.environ[sampcd_processor.ENV_KEY_TEST_CAPACITY] = 'gpu,distributed'
get_test_capacity()
self.assertCountEqual(['cpu', 'gpu', 'distributed'],
sampcd_processor.SAMPLE_CODE_TEST_CAPACITY)
class Test_is_required_match(unittest.TestCase):
def setUp(self):
clear_capacity()
def tearDown(self):
clear_capacity()
get_test_capacity()
def test_alldefault(self):
clear_capacity()
get_test_capacity()
self.assertTrue(is_required_match(''))
self.assertTrue(is_required_match(None))
self.assertTrue(is_required_match('cpu'))
self.assertFalse(is_required_match('gpu'))
self.assertIsNone(is_required_match('skiptest'))
self.assertIsNone(is_required_match('skip'))
self.assertIsNone(is_required_match('cpu,skiptest'))
def test_gpu_equipped(self):
clear_capacity()
os.environ[sampcd_processor.ENV_KEY_TEST_CAPACITY] = 'gpu'
get_test_capacity()
self.assertTrue(is_required_match('cpu'))
self.assertTrue(is_required_match('gpu'))
self.assertTrue(is_required_match('gpu,cpu'))
self.assertIsNone(is_required_match('skiptest'))
self.assertFalse(is_required_match('distributed'))
def test_gpu_distributed_equipped(self):
clear_capacity()
os.environ[sampcd_processor.ENV_KEY_TEST_CAPACITY] = 'gpu,distributed'
get_test_capacity()
self.assertTrue(is_required_match('cpu'))
self.assertTrue(is_required_match('gpu'))
self.assertTrue(is_required_match('distributed'))
self.assertFalse(is_required_match('xpu'))
self.assertIsNone(is_required_match('skiptest'))
class Test_execute_samplecode(unittest.TestCase):
def setUp(self):
if not os.path.exists(SAMPLECODE_TEMP_DIR):
os.mkdir(SAMPLECODE_TEMP_DIR)
self.successSampleCodeFile = os.path.join(SAMPLECODE_TEMP_DIR,
'samplecode_success.py')
if not os.path.exists(sampcd_processor.SAMPLECODE_TEMPDIR):
os.mkdir(sampcd_processor.SAMPLECODE_TEMPDIR)
self.successSampleCodeFile = os.path.join(
sampcd_processor.SAMPLECODE_TEMPDIR, 'samplecode_success.py')
with open(self.successSampleCodeFile, 'w') as f:
f.write('print(1+1)')
self.failedSampleCodeFile = os.path.join(SAMPLECODE_TEMP_DIR,
'samplecode_failed.py')
self.failedSampleCodeFile = os.path.join(
sampcd_processor.SAMPLECODE_TEMPDIR, 'samplecode_failed.py')
with open(self.failedSampleCodeFile, 'w') as f:
f.write('print(1/0)')
......@@ -72,37 +294,41 @@ class Test_execute_samplecode(unittest.TestCase):
os.remove(self.failedSampleCodeFile)
def test_run_success(self):
result, tfname, msg = execute_samplecode(self.successSampleCodeFile)
result, tfname, msg, exec_time = execute_samplecode(
self.successSampleCodeFile)
self.assertTrue(result)
self.assertEqual(self.successSampleCodeFile, tfname)
self.assertIsNotNone(msg)
self.assertLess(msg.find('skipped'), 0)
self.assertLess(exec_time, 10)
def test_run_failed(self):
result, tfname, msg = execute_samplecode(self.failedSampleCodeFile)
result, tfname, msg, exec_time = execute_samplecode(
self.failedSampleCodeFile)
self.assertFalse(result)
self.assertEqual(self.failedSampleCodeFile, tfname)
self.assertIsNotNone(msg)
self.assertLess(msg.find('skipped'), 0)
self.assertLess(exec_time, 10)
def test_testcases_skipped(self):
...
tfname = os.path.join(SAMPLECODE_TEMP_DIR, 'samplecode_skipped.py')
with open(tfname, 'w') as f:
f.write("# required: distributed\nprint(1/0)")
result, _, msg = execute_samplecode(tfname)
self.assertTrue(result)
self.assertGreaterEqual(msg.find('skipped'), 0)
os.remove(tfname)
def clear_summary_info():
for k in sampcd_processor.SUMMARY_INFO.keys():
sampcd_processor.SUMMARY_INFO[k].clear()
class Test_sampcd_extract_to_file(unittest.TestCase):
def setUp(self):
if not os.path.exists(SAMPLECODE_TEMP_DIR):
os.mkdir(SAMPLECODE_TEMP_DIR)
if not os.path.exists(sampcd_processor.SAMPLECODE_TEMPDIR):
os.mkdir(sampcd_processor.SAMPLECODE_TEMPDIR)
clear_capacity()
os.environ[sampcd_processor.ENV_KEY_TEST_CAPACITY] = 'gpu,distributed'
get_test_capacity()
def tearDown(self):
shutil.rmtree(SAMPLECODE_TEMP_DIR)
shutil.rmtree(sampcd_processor.SAMPLECODE_TEMPDIR)
clear_capacity()
get_test_capacity()
def test_1_samplecode(self):
comments = """
......@@ -113,9 +339,10 @@ class Test_sampcd_extract_to_file(unittest.TestCase):
"""
funcname = 'one_plus_one'
sample_code_filenames = sampcd_extract_to_file(comments, funcname)
self.assertCountEqual(
[os.path.join(SAMPLECODE_TEMP_DIR, funcname + '_example.py')],
sample_code_filenames)
self.assertCountEqual([
os.path.join(sampcd_processor.SAMPLECODE_TEMPDIR,
funcname + '_example.py')
], sample_code_filenames)
def test_no_samplecode(self):
comments = """
......@@ -140,10 +367,64 @@ class Test_sampcd_extract_to_file(unittest.TestCase):
funcname = 'one_plus_one'
sample_code_filenames = sampcd_extract_to_file(comments, funcname)
self.assertCountEqual([
os.path.join(SAMPLECODE_TEMP_DIR, funcname + '_example_1.py'),
os.path.join(SAMPLECODE_TEMP_DIR, funcname + '_example_2.py')
os.path.join(sampcd_processor.SAMPLECODE_TEMPDIR,
funcname + '_example_1.py'),
os.path.join(sampcd_processor.SAMPLECODE_TEMPDIR,
funcname + '_example_2.py')
], sample_code_filenames)
def test_2_samplecodes_has_skipped(self):
comments = """
placeholder
Examples:
.. code-block:: python
# required: skiptest
print(1/0)
.. code-block:: python
print(1+1)
.. code-block:: python
# required: gpu
print(1//1)
.. code-block:: python
# required: xpu
print(1//1)
.. code-block:: python
# required: distributed
print(1//1)
.. code-block:: python
# required: gpu
print(1//1)
"""
funcname = 'one_plus_one'
clear_summary_info()
clear_capacity()
get_test_capacity()
sample_code_filenames = sampcd_extract_to_file(comments, funcname)
self.assertCountEqual([
os.path.join(sampcd_processor.SAMPLECODE_TEMPDIR,
funcname + '_example_2.py')
], sample_code_filenames)
self.assertCountEqual(sampcd_processor.SUMMARY_INFO['skiptest'],
[funcname + '-1'])
self.assertCountEqual(sampcd_processor.SUMMARY_INFO['gpu'],
[funcname + '-3', funcname + '-6'])
self.assertCountEqual(sampcd_processor.SUMMARY_INFO['xpu'],
[funcname + '-4'])
self.assertCountEqual(sampcd_processor.SUMMARY_INFO['distributed'],
[funcname + '-5'])
class Test_get_api_md5(unittest.TestCase):
def setUp(self):
......@@ -208,55 +489,6 @@ class Test_get_incrementapi(unittest.TestCase):
], lines)
class Test_get_wlist(unittest.TestCase):
def setUp(self):
self.tmpDir = tempfile.mkdtemp()
self.wlist_filename = os.path.join(self.tmpDir, 'wlist.json')
with open(self.wlist_filename, 'w') as f:
f.write(r'''
{
"wlist_dir":[
{
"name":"../python/paddle/fluid/contrib",
"annotation":""
},
{
"name":"../python/paddle/verison.py",
"annotation":""
}
],
"wlist_api":[
{
"name":"xxxxx",
"annotation":"not a real api, just for example"
}
],
"wlist_temp_api":[
"to_tensor",
"save_persistables@dygraph/checkpoint.py"
],
"gpu_not_white":[
"deformable_conv"
]
}
''')
def tearDown(self):
os.remove(self.wlist_filename)
shutil.rmtree(self.tmpDir)
def test_get_wlist(self):
wlist, wlist_file, gpu_not_white = get_wlist(self.wlist_filename)
self.assertCountEqual(
["xxxxx", "to_tensor",
"save_persistables@dygraph/checkpoint.py"], wlist)
self.assertCountEqual([
"../python/paddle/fluid/contrib",
"../python/paddle/verison.py",
], wlist_file)
self.assertCountEqual(["deformable_conv"], gpu_not_white)
# https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/layers/ops.py
# why? unabled to use the ast module. emmmmm
......
{
"wlist_dir":[
{
"name":"../python/paddle/fluid/contrib",
"annotation":""
},
{
"name":"../python/paddle/verison.py",
"annotation":""
},
{
"name":"../python/paddle/fluid/core_avx.py",
"annotation":""
},
{
"name":"../python/paddle/distributed",
"annotation":""
}
],
"wlist_api":[
{
"name":"xxxxx",
"annotation":"not a real api, just for example"
},
{
"name":"squeeze_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"unsqueeze_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"reshape_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"flatten_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"scatter_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"elu_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"relu_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"softmax_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"tanh_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"ceil_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"floor_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"exp_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"reciprocal_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"round_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"sqrt_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"rsqrt_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"clip_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"scale_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"subtract_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
},
{
"name":"add_",
"annotation":"Inplace APIs don't need sample code. There is a special document introducing Inplace strategy"
}
],
"wlist_temp_api":[
"to_tensor",
"LRScheduler",
"ReduceOnPlateau",
"append_LARS",
"BuildStrategy.debug_graphviz_path",
"BuildStrategy.enable_sequential_execution",
"BuildStrategy.fuse_elewise_add_act_ops",
"BuildStrategy.fuse_relu_depthwise_conv",
"BuildStrategy.gradient_scale_strategy",
"BuildStrategy.reduce_strategy",
"BuildStrategy.remove_unnecessary_lock",
"BuildStrategy.sync_batch_norm",
"DynamicRNN.step_input",
"DynamicRNN.static_input",
"DynamicRNN.block",
"DynamicRNN.update_memory",
"DynamicRNN.output",
"transpiler.DistributeTranspilerConfig",
"transpiler.DistributeTranspilerConfig.slice_var_up",
"transpiler.DistributeTranspilerConfig.split_method",
"transpiler.DistributeTranspilerConfig.min_block_size",
"DistributeTranspilerConfig.slice_var_up",
"DistributeTranspilerConfig.split_method",
"ModelAverage.apply",
"ModelAverage.restore",
"DistributeTranspilerConfig",
"DistributeTranspilerConfig.min_block_size",
"ExecutionStrategy.allow_op_delay",
"load",
"Accuracy.update",
"ChunkEvaluator.update",
"ExecutionStrategy.num_iteration_per_drop_scope",
"ExecutionStrategy.num_threads",
"CompiledProgram._with_inference_optimize",
"CompositeMetric.add_metric",
"CompositeMetric.update",
"CompositeMetric.eval",
"DetectionMAP.get_map_var",
"MetricBase",
"MetricBase.reset",
"MetricBase.get_config",
"MetricBase.update",
"MetricBase.eval",
"Accuracy.eval",
"Auc.update",
"Auc.eval",
"EditDistance.update",
"EditDistance.eval",
"ExponentialMovingAverage.apply",
"ExponentialMovingAverage.restore",
"ExponentialMovingAverage.update",
"StaticRNN.step",
"StaticRNN.step_input",
"StaticRNN.step_output",
"StaticRNN.update_memory",
"DetectionMAP.reset",
"StaticRNN.output",
"cuda_places",
"CUDAPinnedPlace",
"CUDAPlace",
"Program.parse_from_string",
"Compressor",
"Compressor.config",
"Compressor.run",
"HDFSClient.upload",
"HDFSClient.download",
"HDFSClient.is_exist",
"HDFSClient.is_dir",
"HDFSClient.delete",
"HDFSClient.rename",
"HDFSClient.makedirs",
"HDFSClient.ls",
"HDFSClient.lsr",
"multi_download",
"multi_upload",
"TrainingDecoder.block",
"QuantizeTranspiler.training_transpile",
"QuantizeTranspiler.freeze_program",
"AutoMixedPrecisionLists",
"Uniform.sample",
"Uniform.log_prob",
"Uniform.entropy",
"Categorical.kl_divergence",
"Categorical.entropy",
"MultivariateNormalDiag.entropy",
"MultivariateNormalDiag.kl_divergence",
"RNNCell",
"RNNCell.call",
"RNNCell.get_initial_states",
"GRUCell.call",
"LSTMCell.call",
"Decoder",
"Decoder.initialize",
"Decoder.step",
"Decoder.finalize",
"fused_elemwise_activation",
"search_pyramid_hash",
"convert_dist_to_sparse_program",
"load_persistables_for_increment",
"load_persistables_for_inference",
"xmap_readers",
"Metric.reset",
"Metric.update",
"Metric.accumulate",
"Metric.name",
"Metric.compute",
"Accuracy.reset",
"Accuracy.update",
"Accuracy.accumulate",
"Accuracy.name",
"Accuracy.compute",
"Precision.reset",
"Precision.update",
"Precision.accumulate",
"Precision.name",
"Precision.compute",
"Recall.reset",
"Recall.update",
"Recall.accumulate",
"Recall.name",
"Recall.compute",
"Auc.reset",
"Auc.update",
"Auc.accumulate",
"Auc.name",
"Auc.compute",
"Callback.set_params",
"Callback.on_train_begin",
"Callback.on_train_end",
"Callback.on_eval_begin",
"Callback.on_eval_end",
"Callback.on_test_begin",
"Callback.on_test_end",
"Callback.on_epoch_begin",
"Callback.on_epoch_end",
"Callback.on_train_batch_begin",
"Callback.on_train_batch_end",
"Callback.on_eval_batch_begin",
"Callback.on_eval_batch_end",
"Callback.on_test_batch_begin",
"Callback.on_test_batch_end",
"Model.prepare",
"SimpleRNNCell",
"SimpleRNNCell.forward",
"LSTMCell",
"LSTMCell.forward",
"GRUCell",
"GRUCell.forward",
"SimpleRNN",
"GRU",
"LSTM",
"RNN",
"BiRNN",
"RNNCellBase",
"RNNCellBase.get_initial_states",
"gelu",
"erf",
"DecodeHelper",
"DecodeHelper.initialize",
"DecodeHelper.sample",
"DecodeHelper.next_inputs",
"TrainingHelper.initialize",
"TrainingHelper.sample",
"TrainingHelper.next_inputs",
"GreedyEmbeddingHelper.initialize",
"GreedyEmbeddingHelper.sample",
"GreedyEmbeddingHelper.next_inputs",
"LayerList.append",
"HDFSClient",
"InitState",
"TracedLayer",
"SampleEmbeddingHelper.sample",
"BasicDecoder.initialize",
"BasicDecoder.step",
"ParameterList.append",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"BasicDecoder",
"lstm",
"partial_sum",
"StateCell",
"StateCell.compute_state",
"TrainingDecoder",
"TrainingDecoder.step_input",
"TrainingDecoder.static_input",
"TrainingDecoder.output",
"BeamSearchDecoder",
"GradClipByValue",
"GradClipByNorm",
"Variable.detach",
"Variable.numpy",
"Variable.set_value",
"Variable.gradient",
"BeamSearchDecoder.decode",
"BeamSearchDecoder.read_array",
"CompiledProgram",
"CompiledProgram.with_data_parallel",
"append_backward",
"guard",
"to_variable",
"op_freq_statistic",
"save_dygraph",
"load_dygraph",
"ParallelExecutor",
"ParallelExecutor.run",
"ParallelExecutor.drop_local_exe_scopes",
"GradClipByGlobalNorm",
"extend_with_decoupled_weight_decay",
"switch",
"Normal",
"memory_usage",
"decorate",
"PiecewiseDecay",
"InverseTimeDecay",
"PolynomialDecay",
"NoamDecay",
"start_profiler",
"profiler",
"tree_conv",
"multiclass_nms2",
"DataFeedDesc",
"Conv2D",
"Conv3D",
"Conv3DTranspose",
"Embedding",
"NCE",
"PRelu",
"BilinearTensorProduct",
"GroupNorm",
"SpectralNorm",
"TreeConv",
"prroi_pool",
"ChunkEvaluator",
"EditDistance",
"ErrorClipByValue",
"Program.clone",
"cuda_pinned_places",
"DataFeeder",
"elementwise_floordiv",
"Layer",
"Layer.create_parameter",
"Layer.create_variable",
"Layer.sublayers",
"Layer.add_parameter",
"Layer.add_sublayer",
"Layer.parameters",
"Tracer",
"Layer.full_name",
"InMemoryDataset",
"layer_norm",
"bipartite_match",
"double_buffer",
"cumsum",
"thresholded_relu",
"group_norm",
"random_crop",
"row_conv",
"hard_shrink",
"ssd_loss",
"retinanet_target_assign",
"InMemoryDataset.global_shuffle",
"InMemoryDataset.get_memory_data_size",
"DetectionMAP",
"hash",
"InMemoryDataset.set_queue_num",
"LayerNorm",
"Preprocessor",
"chunk_eval",
"GRUUnit",
"ExponentialMovingAverage",
"QueueDataset.global_shuffle",
"NumpyArrayInitializer",
"create_py_reader_by_data",
"InMemoryDataset.local_shuffle",
"InMemoryDataset.get_shuffle_data_size",
"size",
"edit_distance",
"nce",
"BilinearInitializer",
"NaturalExpDecay",
"noam_decay",
"retinanet_detection_output",
"Pool2D",
"PipelineOptimizer",
"generate_mask_labels",
"isfinite",
"InMemoryDataset.set_fleet_send_batch_size",
"cuda_profiler",
"unfold",
"Executor",
"InMemoryDataset.load_into_memory",
"ExponentialDecay",
"BatchNorm",
"deformable_conv",
"InMemoryDataset.preload_into_memory",
"py_reader",
"linear_lr_warmup",
"InMemoryDataset.wait_preload_done",
"CosineDecay",
"roi_perspective_transform",
"unique",
"ones_like",
"LambOptimizer",
"InMemoryDataset.release_memory",
"Conv2DTranspose",
"QueueDataset.local_shuffle",
"save_persistables@dygraph/checkpoint.py",
"load_persistables@dygraph/checkpoint.py",
"elementwise_pow",
"WeightedAverage.reset",
"ChunkEvaluator.eval",
"NCE.forward",
"elementwise_div",
"BilinearTensorProduct.forward",
"NoamDecay.step",
"elementwise_min",
"PiecewiseDecay.step",
"Conv3DTranspose.forward",
"elementwise_add",
"IfElse.output",
"IfElse.true_block",
"InverseTimeDecay.step",
"PolynomialDecay.step",
"Precision.eval",
"enabled",
"elementwise_max",
"stop_gperf_profiler",
"IfElse.false_block",
"WeightedAverage.add",
"Auc.trapezoid_area",
"elementwise_mul",
"GroupNorm.forward",
"SpectralNorm.forward",
"elementwise_sub",
"Switch.case",
"IfElse.input",
"prepare_context",
"PRelu.forward",
"Recall.update",
"start_gperf_profiler",
"TreeConv.forward",
"Conv2D.forward",
"Switch.default",
"elementwise_mod",
"Precision.update",
"WeightedAverage.eval",
"Conv3D.forward",
"Embedding.forward",
"Recall.eval",
"FC.forward",
"While.block",
"DGCMomentumOptimizer",
"ParallelEnv",
"spawn",
"init_parallel_env",
"DataParallel",
"DataParallel.scale_loss",
"DataParallel.apply_collective_grads",
"BasicLSTMCell.forward",
"BasicGRUCell.forward",
"RNN.forward",
"StackedRNNCell.forward",
"StackedLSTMCell.forward",
"LSTM.forward",
"BidirectionalRNN.forward",
"BidirectionalLSTM.forward",
"StackedGRUCell.forward",
"GRU.forward",
"BidirectionalGRU.forward",
"DynamicDecode.forward",
"Conv1dPoolLayer.forward",
"CNNEncoder.forward",
"TransformerCell.forward",
"TransformerBeamSearchDecoder.step",
"MultiHeadAttention.forward",
"MultiHeadAttention.cal_kv",
"FFN.forward",
"TransformerEncoderLayer.forward",
"TransformerEncoder.forward",
"TransformerDecoderLayer.forward",
"TransformerDecoder.forward",
"TransformerDecoder.prepare_static_cache",
"TransformerDecoder.prepare_incremental_cache",
"LinearChainCRF.forward",
"CRFDecoding.forward",
"SequenceTagging.forward",
"XPUPlace",
"is_compiled_with_xpu",
"xpu_places"
],
"gpu_not_white":[
"deformable_conv",
"cuda_places",
"CUDAPinnedPlace",
"CUDAPlace",
"cuda_profiler",
"DGCMomentumOptimizer"
]
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册