diff --git a/PaddleCV/ocr_recognition/eval.py b/PaddleCV/ocr_recognition/eval.py index bc8be4fcf4767183629c2a9c3dc7ea49af5b20b1..99db6c89aaf98d2539f1f1426d50cbc64f80e9e9 100644 --- a/PaddleCV/ocr_recognition/eval.py +++ b/PaddleCV/ocr_recognition/eval.py @@ -1,5 +1,6 @@ import paddle.fluid as fluid from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data +from utility import check_gpu from attention_model import attention_eval from crnn_ctc_model import ctc_eval import data_reader @@ -70,6 +71,7 @@ def evaluate(args): def main(): args = parser.parse_args() print_arguments(args) + check_gpu(args.use_gpu) evaluate(args) diff --git a/PaddleCV/ocr_recognition/infer.py b/PaddleCV/ocr_recognition/infer.py index f7d9fc1416a532557a6ba530306a484b164faaa8..9ad75d17f54ff121e5fbea090659c3c488bfe77a 100755 --- a/PaddleCV/ocr_recognition/infer.py +++ b/PaddleCV/ocr_recognition/infer.py @@ -1,6 +1,7 @@ from __future__ import print_function import paddle.fluid as fluid from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_for_infer, get_ctc_feeder_for_infer +from utility import check_gpu import paddle.fluid.profiler as profiler from crnn_ctc_model import ctc_infer from attention_model import attention_infer @@ -138,6 +139,7 @@ def prune(words, sos, eos): def main(): args = parser.parse_args() print_arguments(args) + check_gpu(args.use_gpu) if args.profile: if args.use_gpu: with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: diff --git a/PaddleCV/ocr_recognition/train.py b/PaddleCV/ocr_recognition/train.py index 2e294907a6bbac5f311c420ad22d51eafa972da7..a2370efdb89fec9ed5d824c9b2f933e347196922 100755 --- a/PaddleCV/ocr_recognition/train.py +++ b/PaddleCV/ocr_recognition/train.py @@ -4,6 +4,7 @@ from __future__ import division from __future__ import print_function import paddle.fluid as fluid from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data +from utility import check_gpu import paddle.fluid.profiler as profiler from crnn_ctc_model import ctc_train_net from attention_model import attention_train_net @@ -67,7 +68,9 @@ def train(args): cycle=args.total_step > 0, model=args.model) test_reader = data_reader.test( - test_images_dir=args.test_images, test_list_file=args.test_list, model=args.model) + test_images_dir=args.test_images, + test_list_file=args.test_list, + model=args.model) # prepare environment place = fluid.CPUPlace() @@ -115,8 +118,8 @@ def train(args): for data in test_reader(): exe.run(inference_program, feed=get_feeder_data(data, place)) _, test_seq_error = error_evaluator.eval(exe) - print("\nTime: %s; Iter[%d]; Test seq error: %s.\n" % ( - time.time(), iter_num, str(test_seq_error[0]))) + print("\nTime: %s; Iter[%d]; Test seq error: %s.\n" % + (time.time(), iter_num, str(test_seq_error[0]))) #Note: The following logs are special for CE monitoring. #Other situations do not need to care about these logs. @@ -155,10 +158,10 @@ def train(args): iter_num += 1 # training log if iter_num % args.log_period == 0: - print("\nTime: %s; Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f" % ( - time.time(), iter_num, - total_loss / (args.log_period * args.batch_size), - total_seq_error / (args.log_period * args.batch_size))) + print("\nTime: %s; Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f" + % (time.time(), iter_num, + total_loss / (args.log_period * args.batch_size), + total_seq_error / (args.log_period * args.batch_size))) print("kpis train_cost %f" % (total_loss / (args.log_period * args.batch_size))) print("kpis train_acc %f" % ( @@ -203,6 +206,7 @@ def train(args): def main(): args = parser.parse_args() print_arguments(args) + check_gpu(args.use_gpu) if args.profile: if args.use_gpu: with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: diff --git a/PaddleCV/ocr_recognition/utility.py b/PaddleCV/ocr_recognition/utility.py index a22a744128e01f97026d1535278566ec8c8f7a66..ab5bbd817d2bbffaba3e6ad58b3acf8b5d2252af 100755 --- a/PaddleCV/ocr_recognition/utility.py +++ b/PaddleCV/ocr_recognition/utility.py @@ -140,3 +140,22 @@ def get_attention_feeder_for_infer(data, place): "init_ids": init_ids, "init_scores": init_scores } + + +def check_gpu(use_gpu): + """ + Log error and exit when set use_gpu=true in paddlepaddle + cpu version. + """ + err = "Config use_gpu cannot be set as true while you are " \ + "using paddlepaddle cpu version ! \nPlease try: \n" \ + "\t1. Install paddlepaddle-gpu to run model on GPU \n" \ + "\t2. Set use_gpu as false in config file to run " \ + "model on CPU" + + try: + if use_gpu and not fluid.is_compiled_with_cuda(): + logger.error(err) + sys.exit(1) + except Exception as e: + pass