From 0a0021f34e2fb6aa353cb196962bb4c5c738803e Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Tue, 23 Apr 2019 20:59:26 +0800 Subject: [PATCH] Fix more core.xxx usages (#2134) --- .../dialogue_model_toolkit/auto_dialogue_evaluation/main.py | 4 ++-- PaddleNLP/language_model/train.py | 3 +-- PaddleNLP/neural_machine_translation/transformer/infer.py | 2 +- PaddleNLP/preprocess/tokenizer/tokenizer.py | 2 +- PaddleNLP/unarchived/chinese_ner/infer.py | 2 +- PaddleNLP/unarchived/language_model/gru/infer.py | 2 +- PaddleNLP/unarchived/language_model/lstm/train.py | 3 +-- PaddleNLP/unarchived/machine_reading_comprehension/run.py | 3 +-- .../unarchived/neural_machine_translation/rnn_search/infer.py | 3 +-- .../unarchived/neural_machine_translation/rnn_search/train.py | 3 +-- PaddleNLP/unarchived/sequence_tagging_for_ner/infer.py | 2 +- .../text_classification/clouds/scdb_parallel_executor.py | 2 +- 12 files changed, 13 insertions(+), 18 deletions(-) diff --git a/PaddleNLP/dialogue_model_toolkit/auto_dialogue_evaluation/main.py b/PaddleNLP/dialogue_model_toolkit/auto_dialogue_evaluation/main.py index ad04cd8b..d0107c83 100755 --- a/PaddleNLP/dialogue_model_toolkit/auto_dialogue_evaluation/main.py +++ b/PaddleNLP/dialogue_model_toolkit/auto_dialogue_evaluation/main.py @@ -352,7 +352,7 @@ def evaluate(args): t0 = time.time() - with fluid.scope_guard(fluid.core.Scope()): + with fluid.scope_guard(fluid.Scope()): infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model( args.init_model, exe) @@ -402,7 +402,7 @@ def infer(args): t0 = time.time() - with fluid.scope_guard(fluid.core.Scope()): + with fluid.scope_guard(fluid.Scope()): infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model( args.init_model, exe) diff --git a/PaddleNLP/language_model/train.py b/PaddleNLP/language_model/train.py index c6e56467..115b803e 100644 --- a/PaddleNLP/language_model/train.py +++ b/PaddleNLP/language_model/train.py @@ -25,7 +25,6 @@ import math import paddle import paddle.fluid as fluid -import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.executor import Executor @@ -179,7 +178,7 @@ def train(): optimizer.minimize(loss) - place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) diff --git a/PaddleNLP/neural_machine_translation/transformer/infer.py b/PaddleNLP/neural_machine_translation/transformer/infer.py index 858c8103..08543f01 100644 --- a/PaddleNLP/neural_machine_translation/transformer/infer.py +++ b/PaddleNLP/neural_machine_translation/transformer/infer.py @@ -285,7 +285,7 @@ def fast_infer(args): seq_ids_list, seq_scores_list = [seq_ids], [ seq_scores ] if isinstance( - seq_ids, paddle.fluid.core.LoDTensor) else (seq_ids, seq_scores) + seq_ids, paddle.fluid.LoDTensor) else (seq_ids, seq_scores) for seq_ids, seq_scores in zip(seq_ids_list, seq_scores_list): # How to parse the results: # Suppose the lod of seq_ids is: diff --git a/PaddleNLP/preprocess/tokenizer/tokenizer.py b/PaddleNLP/preprocess/tokenizer/tokenizer.py index d1b00d1c..910f45a7 100644 --- a/PaddleNLP/preprocess/tokenizer/tokenizer.py +++ b/PaddleNLP/preprocess/tokenizer/tokenizer.py @@ -119,7 +119,7 @@ def infer(args): #place = fluid.CUDAPlace(0) exe = fluid.Executor(place) #print("aaa") - inference_scope = fluid.core.Scope() + inference_scope = fluid.Scope() with fluid.scope_guard(inference_scope): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(args.model_path, exe) diff --git a/PaddleNLP/unarchived/chinese_ner/infer.py b/PaddleNLP/unarchived/chinese_ner/infer.py index dd0d156b..4bcb9e0b 100644 --- a/PaddleNLP/unarchived/chinese_ner/infer.py +++ b/PaddleNLP/unarchived/chinese_ner/infer.py @@ -92,7 +92,7 @@ def infer(args): feeder = fluid.DataFeeder(feed_list=[word, mention, target], place=place) exe = fluid.Executor(place) - inference_scope = fluid.core.Scope() + inference_scope = fluid.Scope() with fluid.scope_guard(inference_scope): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(args.model_path, exe) diff --git a/PaddleNLP/unarchived/language_model/gru/infer.py b/PaddleNLP/unarchived/language_model/gru/infer.py index ad03ef39..e7595d0b 100644 --- a/PaddleNLP/unarchived/language_model/gru/infer.py +++ b/PaddleNLP/unarchived/language_model/gru/infer.py @@ -17,7 +17,7 @@ def infer(test_reader, use_cuda, model_path): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - with fluid.scope_guard(fluid.core.Scope()): + with fluid.scope_guard(fluid.Scope()): infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model( model_path, exe) diff --git a/PaddleNLP/unarchived/language_model/lstm/train.py b/PaddleNLP/unarchived/language_model/lstm/train.py index 2c22b30f..d4d3466a 100644 --- a/PaddleNLP/unarchived/language_model/lstm/train.py +++ b/PaddleNLP/unarchived/language_model/lstm/train.py @@ -25,7 +25,6 @@ import math import paddle import paddle.fluid as fluid -import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.executor import Executor @@ -178,7 +177,7 @@ def train(): optimizer.minimize(loss) - place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) diff --git a/PaddleNLP/unarchived/machine_reading_comprehension/run.py b/PaddleNLP/unarchived/machine_reading_comprehension/run.py index b202baec..afb2ff84 100644 --- a/PaddleNLP/unarchived/machine_reading_comprehension/run.py +++ b/PaddleNLP/unarchived/machine_reading_comprehension/run.py @@ -26,7 +26,6 @@ import multiprocessing import paddle import paddle.fluid as fluid -import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.executor import Executor @@ -389,7 +388,7 @@ def train(logger, args): optimizer.minimize(obj_func) # initialize parameters - place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = Executor(place) if args.load_dir: logger.info('load from {}'.format(args.load_dir)) diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/infer.py b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/infer.py index 4345b29c..f042d5ef 100644 --- a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/infer.py +++ b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/infer.py @@ -22,7 +22,6 @@ import six import paddle import paddle.fluid as fluid -import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.executor import Executor from paddle.fluid.contrib.decoder.beam_search_decoder import * @@ -65,7 +64,7 @@ def infer(): batch_size=args.batch_size, drop_last=False) - place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) diff --git a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/train.py b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/train.py index ade0dd75..fbb93eab 100644 --- a/PaddleNLP/unarchived/neural_machine_translation/rnn_search/train.py +++ b/PaddleNLP/unarchived/neural_machine_translation/rnn_search/train.py @@ -22,7 +22,6 @@ import os import paddle import paddle.fluid as fluid -import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.executor import Executor from paddle.fluid.contrib.decoder.beam_search_decoder import * @@ -95,7 +94,7 @@ def train(): batch_size=args.batch_size, drop_last=False) - place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) diff --git a/PaddleNLP/unarchived/sequence_tagging_for_ner/infer.py b/PaddleNLP/unarchived/sequence_tagging_for_ner/infer.py index 8041b894..9319aad8 100644 --- a/PaddleNLP/unarchived/sequence_tagging_for_ner/infer.py +++ b/PaddleNLP/unarchived/sequence_tagging_for_ner/infer.py @@ -31,7 +31,7 @@ def infer(model_path, batch_size, test_data_file, vocab_file, target_file, place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) - inference_scope = fluid.core.Scope() + inference_scope = fluid.Scope() with fluid.scope_guard(inference_scope): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(model_path, exe) diff --git a/PaddleNLP/unarchived/text_classification/clouds/scdb_parallel_executor.py b/PaddleNLP/unarchived/text_classification/clouds/scdb_parallel_executor.py index cc5cd4ee..d11da00e 100644 --- a/PaddleNLP/unarchived/text_classification/clouds/scdb_parallel_executor.py +++ b/PaddleNLP/unarchived/text_classification/clouds/scdb_parallel_executor.py @@ -348,7 +348,7 @@ def infer(test_reader, use_cuda, model_path=None): place = fluid.CPUPlace() exe = fluid.Executor(place) - inference_scope = fluid.core.Scope() + inference_scope = fluid.Scope() with fluid.scope_guard(inference_scope): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(model_path, exe) -- GitLab