From 6bed4dab8c28787ea64f602a2a451356ee7eda07 Mon Sep 17 00:00:00 2001 From: silingtong123 <35439432+silingtong123@users.noreply.github.com> Date: Tue, 27 Aug 2019 16:12:50 +0800 Subject: [PATCH] remove the usage of InferenceTranspiler (#797) --- 03.image_classification/README.cn.md | 14 +------------- 03.image_classification/README.md | 13 +------------ 03.image_classification/index.cn.html | 14 +------------- 03.image_classification/index.html | 13 +------------ 03.image_classification/train.py | 16 ---------------- 5 files changed, 4 insertions(+), 66 deletions(-) diff --git a/03.image_classification/README.cn.md b/03.image_classification/README.cn.md index d28269f..9b82a26 100644 --- a/03.image_classification/README.cn.md +++ b/03.image_classification/README.cn.md @@ -538,11 +538,7 @@ with fluid.scope_guard(inference_scope): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) - # The input's dimension of conv should be 4-D or 5-D. - # Use inference_transpiler to speedup - inference_transpiler_program = inference_program.clone() - t = fluid.transpiler.InferenceTranspiler() - t.transpile(inference_transpiler_program, place) + # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -550,14 +546,6 @@ with fluid.scope_guard(inference_scope): feed={feed_target_names[0]: img}, fetch_list=fetch_targets) - transpiler_results = exe.run(inference_transpiler_program, - feed={feed_target_names[0]: img}, - fetch_list=fetch_targets) - - assert len(results[0]) == len(transpiler_results[0]) - for i in range(len(results[0])): - numpy.testing.assert_almost_equal( - results[0][i], transpiler_results[0][i], decimal=5) # infer label label_list = [ diff --git a/03.image_classification/README.md b/03.image_classification/README.md index 661ff53..7526cb1 100644 --- a/03.image_classification/README.md +++ b/03.image_classification/README.md @@ -541,11 +541,7 @@ with fluid.scope_guard(inference_scope): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) - # The input's dimension of conv should be 4-D or 5-D. - # Use inference_transpiler to speedup - inference_transpiler_program = inference_program.clone() - t = fluid.transpiler.InferenceTranspiler() - t.transpile(inference_transpiler_program, place) + # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -553,14 +549,7 @@ with fluid.scope_guard(inference_scope): feed={feed_target_names[0]: img}, fetch_list=fetch_targets) - transpiler_results = exe.run(inference_transpiler_program, - feed={feed_target_names[0]: img}, - fetch_list=fetch_targets) - assert len(results[0]) == len(transpiler_results[0]) - for i in range(len(results[0])): - numpy.testing.assert_almost_equal( - results[0][i], transpiler_results[0][i], decimal=5) # infer label label_list = [ diff --git a/03.image_classification/index.cn.html b/03.image_classification/index.cn.html index 376054e..3a66d5c 100644 --- a/03.image_classification/index.cn.html +++ b/03.image_classification/index.cn.html @@ -580,11 +580,7 @@ with fluid.scope_guard(inference_scope): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) - # The input's dimension of conv should be 4-D or 5-D. - # Use inference_transpiler to speedup - inference_transpiler_program = inference_program.clone() - t = fluid.transpiler.InferenceTranspiler() - t.transpile(inference_transpiler_program, place) + # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -592,14 +588,6 @@ with fluid.scope_guard(inference_scope): feed={feed_target_names[0]: img}, fetch_list=fetch_targets) - transpiler_results = exe.run(inference_transpiler_program, - feed={feed_target_names[0]: img}, - fetch_list=fetch_targets) - - assert len(results[0]) == len(transpiler_results[0]) - for i in range(len(results[0])): - numpy.testing.assert_almost_equal( - results[0][i], transpiler_results[0][i], decimal=5) # infer label label_list = [ diff --git a/03.image_classification/index.html b/03.image_classification/index.html index fcf8c74..7eaca4f 100644 --- a/03.image_classification/index.html +++ b/03.image_classification/index.html @@ -583,11 +583,7 @@ with fluid.scope_guard(inference_scope): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) - # The input's dimension of conv should be 4-D or 5-D. - # Use inference_transpiler to speedup - inference_transpiler_program = inference_program.clone() - t = fluid.transpiler.InferenceTranspiler() - t.transpile(inference_transpiler_program, place) + # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -595,14 +591,7 @@ with fluid.scope_guard(inference_scope): feed={feed_target_names[0]: img}, fetch_list=fetch_targets) - transpiler_results = exe.run(inference_transpiler_program, - feed={feed_target_names[0]: img}, - fetch_list=fetch_targets) - assert len(results[0]) == len(transpiler_results[0]) - for i in range(len(results[0])): - numpy.testing.assert_almost_equal( - results[0][i], transpiler_results[0][i], decimal=5) # infer label label_list = [ diff --git a/03.image_classification/train.py b/03.image_classification/train.py index 78bf1ce..47df180 100644 --- a/03.image_classification/train.py +++ b/03.image_classification/train.py @@ -191,12 +191,6 @@ def infer(use_cuda, params_dirname=None): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(params_dirname, exe) - # The input's dimension of conv should be 4-D or 5-D. - # Use inference_transpiler to speedup - inference_transpiler_program = inference_program.clone() - t = fluid.transpiler.InferenceTranspiler() - t.transpile(inference_transpiler_program, place) - # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. results = exe.run( @@ -204,16 +198,6 @@ def infer(use_cuda, params_dirname=None): feed={feed_target_names[0]: img}, fetch_list=fetch_targets) - transpiler_results = exe.run( - inference_transpiler_program, - feed={feed_target_names[0]: img}, - fetch_list=fetch_targets) - - assert len(results[0]) == len(transpiler_results[0]) - for i in range(len(results[0])): - numpy.testing.assert_almost_equal( - results[0][i], transpiler_results[0][i], decimal=5) - # infer label label_list = [ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", -- GitLab