未验证 提交 61ef56a1 编写于 作者: H heliqi 提交者: GitHub

PassAutoScan 基线跟测试用例使用一样配置的config (#38252)

* add timeout

* add timeout

* PassAutoScan base_line use same config

* try run base_line

* fix dropout Mask of output attr error

* fix dropout Mask of output attr error
上级 88c2cba1
...@@ -330,8 +330,7 @@ class PassAutoScanTest(AutoScanTest): ...@@ -330,8 +330,7 @@ class PassAutoScanTest(AutoScanTest):
reproduce=None, reproduce=None,
min_success_num=25, min_success_num=25,
max_duration=180, max_duration=180,
passes=None, passes=None):
use_gpu_run_baseline=False):
if os.getenv('HYPOTHESIS_TEST_PROFILE', 'ci') == "dev": if os.getenv('HYPOTHESIS_TEST_PROFILE', 'ci') == "dev":
max_examples *= 10 max_examples *= 10
min_success_num *= 10 min_success_num *= 10
...@@ -356,10 +355,7 @@ class PassAutoScanTest(AutoScanTest): ...@@ -356,10 +355,7 @@ class PassAutoScanTest(AutoScanTest):
return self.sample_program_config(draw) return self.sample_program_config(draw)
def run_test(prog_config): def run_test(prog_config):
return self.run_test( return self.run_test(quant=quant, prog_configs=[prog_config])
quant=quant,
prog_configs=[prog_config],
use_gpu_run_baseline=use_gpu_run_baseline)
generator = st.composite(program_generator) generator = st.composite(program_generator)
loop_func = given(generator())(run_test) loop_func = given(generator())(run_test)
...@@ -396,10 +392,7 @@ class PassAutoScanTest(AutoScanTest): ...@@ -396,10 +392,7 @@ class PassAutoScanTest(AutoScanTest):
format(max_duration)) format(max_duration))
assert False assert False
def run_test(self, def run_test(self, quant=False, prog_configs=None):
quant=False,
prog_configs=None,
use_gpu_run_baseline=False):
status = True status = True
for prog_config in prog_configs: for prog_config in prog_configs:
...@@ -418,22 +411,13 @@ class PassAutoScanTest(AutoScanTest): ...@@ -418,22 +411,13 @@ class PassAutoScanTest(AutoScanTest):
'data': tensor_config.data, 'data': tensor_config.data,
'lod': tensor_config.lod 'lod': tensor_config.lod
} }
results: List[Dict[str, np.ndarray]] = []
# baseline: cpu no ir_optim run
base_config = self.create_inference_config(
ir_optim=False, use_gpu=use_gpu_run_baseline)
logging.info('RUN program_config: ' + str(prog_config)) logging.info('RUN program_config: ' + str(prog_config))
results.append(
self.run_test_config(model, params, prog_config, base_config,
feed_data))
self.success_log('RUN_CPU_BASELINE done')
self.num_predictor_kinds = 0 self.num_predictor_kinds = 0
for pred_config, op_list, ( for pred_config, op_list, (
atol, rtol) in self.sample_predictor_configs(prog_config): atol, rtol) in self.sample_predictor_configs(prog_config):
self.num_predictor_kinds += 1 self.num_predictor_kinds += 1
# skip info # skip info
ignore_flag = False ignore_flag = False
for ignore_info in self.ignore_cases: for ignore_info in self.ignore_cases:
...@@ -454,12 +438,26 @@ class PassAutoScanTest(AutoScanTest): ...@@ -454,12 +438,26 @@ class PassAutoScanTest(AutoScanTest):
if not os.path.exists(self.cache_dir): if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir) os.mkdir(self.cache_dir)
# baseline: no ir_optim run
base_config = self.create_inference_config(
ir_optim=False,
use_gpu=pred_config.use_gpu(),
use_mkldnn=pred_config.mkldnn_enabled(), )
try: try:
results.append( # baseline
self.run_test_config(model, params, prog_config, base_result = self.run_test_config(
pred_config, feed_data)) model, params, prog_config, base_config, feed_data)
self.assert_tensors_near(atol, rtol, results[-1], self.success_log('RUN_BASELINE ' +
results[0]) self.inference_config_str(
base_config) + ' done')
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
pred_result = self.run_test_config(
model, params, prog_config, pred_config, feed_data)
self.assert_tensors_near(atol, rtol, pred_result,
base_result)
if not ignore_flag: if not ignore_flag:
self.assert_op_list(op_list) self.assert_op_list(op_list)
......
...@@ -127,8 +127,7 @@ class TestFCElementwiseLayerNormFusePass(PassAutoScanTest): ...@@ -127,8 +127,7 @@ class TestFCElementwiseLayerNormFusePass(PassAutoScanTest):
self.run_and_statis( self.run_and_statis(
quant=False, quant=False,
max_examples=300, max_examples=300,
passes=["fc_elementwise_layernorm_fuse_pass"], passes=["fc_elementwise_layernorm_fuse_pass"])
use_gpu_run_baseline=True)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -46,7 +46,8 @@ class TestSimplifyWithBasicOpsPassUpscale(PassAutoScanTest): ...@@ -46,7 +46,8 @@ class TestSimplifyWithBasicOpsPassUpscale(PassAutoScanTest):
dropout_op = OpConfig( dropout_op = OpConfig(
"dropout", "dropout",
inputs={"X": ["input_data"]}, inputs={"X": ["input_data"]},
outputs={"Out": ["dropout_output"]}, outputs={"Out": ["dropout_output"],
"Mask": ["mask"]},
fix_seed=fix_seed, fix_seed=fix_seed,
dropout_implementation=dropout_implementation, dropout_implementation=dropout_implementation,
dropout_prob=dropout_prob, dropout_prob=dropout_prob,
...@@ -107,7 +108,8 @@ class TestSimplifyWithBasicOpsPassDowngrade(PassAutoScanTest): ...@@ -107,7 +108,8 @@ class TestSimplifyWithBasicOpsPassDowngrade(PassAutoScanTest):
dropout_op = OpConfig( dropout_op = OpConfig(
"dropout", "dropout",
inputs={"X": ["input_data"]}, inputs={"X": ["input_data"]},
outputs={"Out": ["dropout_output"]}, outputs={"Out": ["dropout_output"],
"Mask": ["mask"]},
fix_seed=fix_seed, fix_seed=fix_seed,
dropout_implementation=dropout_implementation, dropout_implementation=dropout_implementation,
dropout_prob=dropout_prob, dropout_prob=dropout_prob,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册