From 0f4edd78361e368beba28177008b950b0f6c6d1f Mon Sep 17 00:00:00 2001 From: luotao1 Date: Thu, 20 Sep 2018 10:18:43 +0800 Subject: [PATCH] use clone(for_test=True) replace get_inference_program --- fluid/chinese_ner/train.py | 5 +---- fluid/mnist/model.py | 11 +++++------ fluid/neural_machine_translation/transformer/train.py | 4 +--- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/fluid/chinese_ner/train.py b/fluid/chinese_ner/train.py index 82441f43..5ce04b8c 100644 --- a/fluid/chinese_ner/train.py +++ b/fluid/chinese_ner/train.py @@ -282,10 +282,7 @@ def main(args): chunk_evaluator = fluid.metrics.ChunkEvaluator() - inference_program = fluid.default_main_program().clone() - with fluid.program_guard(inference_program): - inference_program = fluid.io.get_inference_program( - [num_infer_chunks, num_label_chunks, num_correct_chunks]) + inference_program = fluid.default_main_program().clone(for_test=True) train_reader = paddle.batch( paddle.reader.shuffle( diff --git a/fluid/mnist/model.py b/fluid/mnist/model.py index 9f830a9c..a66353c2 100644 --- a/fluid/mnist/model.py +++ b/fluid/mnist/model.py @@ -72,7 +72,8 @@ def cnn_model(data): # TODO(dzhwinter) : refine the initializer and random seed settting SIZE = 10 input_shape = conv_pool_2.shape - param_shape = [six.moves.reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] + param_shape = [six.moves.reduce(lambda a, b: a * b, input_shape[1:], 1) + ] + [SIZE] scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 predict = fluid.layers.fc( @@ -90,7 +91,8 @@ def eval_test(exe, batch_acc, batch_size_tensor, inference_program): paddle.dataset.mnist.test(), batch_size=args.batch_size) test_pass_acc = fluid.average.WeightedAverage() for batch_id, data in enumerate(test_reader()): - img_data = np.array([x[0].reshape([1, 28, 28]) for x in data]).astype(DTYPE) + img_data = np.array( + [x[0].reshape([1, 28, 28]) for x in data]).astype(DTYPE) y_data = np.array([x[1] for x in data]).astype("int64") y_data = y_data.reshape([len(y_data), 1]) @@ -123,10 +125,7 @@ def run_benchmark(model, args): input=predict, label=label, total=batch_size_tensor) # inference program - inference_program = fluid.default_main_program().clone() - with fluid.program_guard(inference_program): - inference_program = fluid.io.get_inference_program( - target_vars=[batch_acc, batch_size_tensor]) + inference_program = fluid.default_main_program().clone(for_test=True) # Optimization opt = fluid.optimizer.AdamOptimizer( diff --git a/fluid/neural_machine_translation/transformer/train.py b/fluid/neural_machine_translation/transformer/train.py index 69d2ce27..aee780ed 100644 --- a/fluid/neural_machine_translation/transformer/train.py +++ b/fluid/neural_machine_translation/transformer/train.py @@ -260,9 +260,7 @@ def split_data(data, num_part): def test_context(train_progm, avg_cost, train_exe, dev_count, data_input_names, sum_cost, token_num): # Context to do validation. - test_program = train_progm.clone() - with fluid.program_guard(test_program): - test_program = fluid.io.get_inference_program([avg_cost]) + test_program = train_progm.clone(for_test=True) val_data = reader.DataReader( src_vocab_fpath=args.src_vocab_fpath, -- GitLab