未验证 提交 57fd89dc 编写于 作者: W whs 提交者: GitHub

Check use_gpu in ocr models. (#2736)

上级 23d450e9
import paddle.fluid as fluid import paddle.fluid as fluid
from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data
from utility import check_gpu
from attention_model import attention_eval from attention_model import attention_eval
from crnn_ctc_model import ctc_eval from crnn_ctc_model import ctc_eval
import data_reader import data_reader
...@@ -70,6 +71,7 @@ def evaluate(args): ...@@ -70,6 +71,7 @@ def evaluate(args):
def main(): def main():
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
check_gpu(args.use_gpu)
evaluate(args) evaluate(args)
......
from __future__ import print_function from __future__ import print_function
import paddle.fluid as fluid import paddle.fluid as fluid
from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_for_infer, get_ctc_feeder_for_infer from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_for_infer, get_ctc_feeder_for_infer
from utility import check_gpu
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
from crnn_ctc_model import ctc_infer from crnn_ctc_model import ctc_infer
from attention_model import attention_infer from attention_model import attention_infer
...@@ -138,6 +139,7 @@ def prune(words, sos, eos): ...@@ -138,6 +139,7 @@ def prune(words, sos, eos):
def main(): def main():
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
check_gpu(args.use_gpu)
if args.profile: if args.profile:
if args.use_gpu: if args.use_gpu:
with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof:
......
...@@ -4,6 +4,7 @@ from __future__ import division ...@@ -4,6 +4,7 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import paddle.fluid as fluid import paddle.fluid as fluid
from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data
from utility import check_gpu
import paddle.fluid.profiler as profiler import paddle.fluid.profiler as profiler
from crnn_ctc_model import ctc_train_net from crnn_ctc_model import ctc_train_net
from attention_model import attention_train_net from attention_model import attention_train_net
...@@ -67,7 +68,9 @@ def train(args): ...@@ -67,7 +68,9 @@ def train(args):
cycle=args.total_step > 0, cycle=args.total_step > 0,
model=args.model) model=args.model)
test_reader = data_reader.test( test_reader = data_reader.test(
test_images_dir=args.test_images, test_list_file=args.test_list, model=args.model) test_images_dir=args.test_images,
test_list_file=args.test_list,
model=args.model)
# prepare environment # prepare environment
place = fluid.CPUPlace() place = fluid.CPUPlace()
...@@ -115,8 +118,8 @@ def train(args): ...@@ -115,8 +118,8 @@ def train(args):
for data in test_reader(): for data in test_reader():
exe.run(inference_program, feed=get_feeder_data(data, place)) exe.run(inference_program, feed=get_feeder_data(data, place))
_, test_seq_error = error_evaluator.eval(exe) _, test_seq_error = error_evaluator.eval(exe)
print("\nTime: %s; Iter[%d]; Test seq error: %s.\n" % ( print("\nTime: %s; Iter[%d]; Test seq error: %s.\n" %
time.time(), iter_num, str(test_seq_error[0]))) (time.time(), iter_num, str(test_seq_error[0])))
#Note: The following logs are special for CE monitoring. #Note: The following logs are special for CE monitoring.
#Other situations do not need to care about these logs. #Other situations do not need to care about these logs.
...@@ -155,10 +158,10 @@ def train(args): ...@@ -155,10 +158,10 @@ def train(args):
iter_num += 1 iter_num += 1
# training log # training log
if iter_num % args.log_period == 0: if iter_num % args.log_period == 0:
print("\nTime: %s; Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f" % ( print("\nTime: %s; Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f"
time.time(), iter_num, % (time.time(), iter_num,
total_loss / (args.log_period * args.batch_size), total_loss / (args.log_period * args.batch_size),
total_seq_error / (args.log_period * args.batch_size))) total_seq_error / (args.log_period * args.batch_size)))
print("kpis train_cost %f" % (total_loss / (args.log_period * print("kpis train_cost %f" % (total_loss / (args.log_period *
args.batch_size))) args.batch_size)))
print("kpis train_acc %f" % ( print("kpis train_acc %f" % (
...@@ -203,6 +206,7 @@ def train(args): ...@@ -203,6 +206,7 @@ def train(args):
def main(): def main():
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
check_gpu(args.use_gpu)
if args.profile: if args.profile:
if args.use_gpu: if args.use_gpu:
with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof:
......
...@@ -140,3 +140,22 @@ def get_attention_feeder_for_infer(data, place): ...@@ -140,3 +140,22 @@ def get_attention_feeder_for_infer(data, place):
"init_ids": init_ids, "init_ids": init_ids,
"init_scores": init_scores "init_scores": init_scores
} }
def check_gpu(use_gpu):
"""
Log error and exit when set use_gpu=true in paddlepaddle
cpu version.
"""
err = "Config use_gpu cannot be set as true while you are " \
"using paddlepaddle cpu version ! \nPlease try: \n" \
"\t1. Install paddlepaddle-gpu to run model on GPU \n" \
"\t2. Set use_gpu as false in config file to run " \
"model on CPU"
try:
if use_gpu and not fluid.is_compiled_with_cuda():
logger.error(err)
sys.exit(1)
except Exception as e:
pass
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册