未验证 提交 0a0021f3 编写于 作者: Y Yibing Liu 提交者: GitHub

Fix more core.xxx usages (#2134)

上级 550a0e2e
......@@ -352,7 +352,7 @@ def evaluate(args):
t0 = time.time()
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(
args.init_model, exe)
......@@ -402,7 +402,7 @@ def infer(args):
t0 = time.time()
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(
args.init_model, exe)
......
......@@ -25,7 +25,6 @@ import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
......@@ -179,7 +178,7 @@ def train():
optimizer.minimize(loss)
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
......
......@@ -285,7 +285,7 @@ def fast_infer(args):
seq_ids_list, seq_scores_list = [seq_ids], [
seq_scores
] if isinstance(
seq_ids, paddle.fluid.core.LoDTensor) else (seq_ids, seq_scores)
seq_ids, paddle.fluid.LoDTensor) else (seq_ids, seq_scores)
for seq_ids, seq_scores in zip(seq_ids_list, seq_scores_list):
# How to parse the results:
# Suppose the lod of seq_ids is:
......
......@@ -119,7 +119,7 @@ def infer(args):
#place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
#print("aaa")
inference_scope = fluid.core.Scope()
inference_scope = fluid.Scope()
with fluid.scope_guard(inference_scope):
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(args.model_path, exe)
......
......@@ -92,7 +92,7 @@ def infer(args):
feeder = fluid.DataFeeder(feed_list=[word, mention, target], place=place)
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
inference_scope = fluid.Scope()
with fluid.scope_guard(inference_scope):
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(args.model_path, exe)
......
......@@ -17,7 +17,7 @@ def infer(test_reader, use_cuda, model_path):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(fluid.core.Scope()):
with fluid.scope_guard(fluid.Scope()):
infer_program, feed_target_names, fetch_vars = fluid.io.load_inference_model(
model_path, exe)
......
......@@ -25,7 +25,6 @@ import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
......@@ -178,7 +177,7 @@ def train():
optimizer.minimize(loss)
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
......
......@@ -26,7 +26,6 @@ import multiprocessing
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
......@@ -389,7 +388,7 @@ def train(logger, args):
optimizer.minimize(obj_func)
# initialize parameters
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = Executor(place)
if args.load_dir:
logger.info('load from {}'.format(args.load_dir))
......
......@@ -22,7 +22,6 @@ import six
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
from paddle.fluid.contrib.decoder.beam_search_decoder import *
......@@ -65,7 +64,7 @@ def infer():
batch_size=args.batch_size,
drop_last=False)
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
......
......@@ -22,7 +22,6 @@ import os
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
from paddle.fluid.contrib.decoder.beam_search_decoder import *
......@@ -95,7 +94,7 @@ def train():
batch_size=args.batch_size,
drop_last=False)
place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace()
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
......
......@@ -31,7 +31,7 @@ def infer(model_path, batch_size, test_data_file, vocab_file, target_file,
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
inference_scope = fluid.Scope()
with fluid.scope_guard(inference_scope):
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(model_path, exe)
......
......@@ -348,7 +348,7 @@ def infer(test_reader, use_cuda, model_path=None):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
inference_scope = fluid.Scope()
with fluid.scope_guard(inference_scope):
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(model_path, exe)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册