未验证 提交 ee491be3 编写于 作者: S SunGaofeng 提交者: GitHub

cherry pick to 1.5 for check cuda and python3 modifications (#2739)

* fix pickle.load in ctcn_reader for python3 (#2679)

fix pickle.load to fit both python3 and 2

* fix subprocess not stop when press ctr+c to stop main process (#2681)

* add cuda check to show errors when run on paddle compiled with cpu only (#2732)

* add cuda check to show errors when run on paddle compiled with cpu only

* change use_cuda to use_gpu in the error infomation to be consistent with code
上级 99645eb2
......@@ -199,18 +199,18 @@ class CTCNReader(DataReader):
def load_file(self, fname):
if python_ver < (3, 0):
rgb_pkl = pickle.load(
open(os.path.join(self.root, self.rgb, fname + '.pkl')))
open(os.path.join(self.root, self.rgb, fname + '.pkl'), 'rb'))
flow_pkl = pickle.load(
open(os.path.join(self.root, self.flow, fname + '.pkl')))
open(os.path.join(self.root, self.flow, fname + '.pkl'), 'rb'))
else:
rgb_pkl = pickle.load(
open(os.path.join(self.root, self.rgb, fname + '.pkl')),
open(os.path.join(self.root, self.rgb, fname + '.pkl'), 'rb'),
encoding='bytes')
flow_pkl = pickle.load(
open(os.path.join(self.root, self.flow, fname + '.pkl')),
open(os.path.join(self.root, self.flow, fname + '.pkl'), 'rb'),
encoding='bytes')
data_flow = np.array(flow_pkl['scores'])
data_rgb = np.array(rgb_pkl['scores'])
data_flow = np.array(flow_pkl[b'scores'])
data_rgb = np.array(rgb_pkl[b'scores'])
if data_flow.shape[0] < data_rgb.shape[0]:
data_rgb = data_rgb[0:data_flow.shape[0], :]
elif data_flow.shape[0] > data_rgb.shape[0]:
......
......@@ -17,6 +17,7 @@ import sys
import time
import logging
import argparse
import ast
import numpy as np
try:
import cPickle as pickle
......@@ -27,6 +28,7 @@ import paddle.fluid as fluid
from config import *
import models
from datareader import get_reader
from utils import check_cuda
logging.root.handlers = []
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
......@@ -47,7 +49,10 @@ def parse_args():
default='configs/attention_cluster.txt',
help='path to config file of model')
parser.add_argument(
'--use_gpu', type=bool, default=True, help='default use gpu.')
'--use_gpu',
type=ast.literal_eval,
default=True,
help='default use gpu.')
parser.add_argument(
'--weights',
type=str,
......@@ -155,6 +160,8 @@ def infer(args):
if __name__ == "__main__":
args = parse_args()
# check whether the installed paddle is compiled with GPU
check_cuda(args.use_gpu)
logger.info(args)
infer(args)
......@@ -17,6 +17,7 @@ import sys
import time
import logging
import argparse
import ast
import numpy as np
import paddle.fluid as fluid
......@@ -24,6 +25,7 @@ from config import *
import models
from datareader import get_reader
from metrics import get_metrics
from utils import check_cuda
logging.root.handlers = []
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
......@@ -49,7 +51,10 @@ def parse_args():
default=None,
help='test batch size. None to use config file setting.')
parser.add_argument(
'--use_gpu', type=bool, default=True, help='default use gpu.')
'--use_gpu',
type=ast.literal_eval,
default=True,
help='default use gpu.')
parser.add_argument(
'--weights',
type=str,
......@@ -141,6 +146,8 @@ def test(args):
if __name__ == "__main__":
args = parse_args()
# check whether the installed paddle is compiled with GPU
check_cuda(args.use_gpu)
logger.info(args)
test(args)
......@@ -16,6 +16,7 @@ import os
import sys
import time
import argparse
import ast
import logging
import numpy as np
import paddle.fluid as fluid
......@@ -25,6 +26,7 @@ import models
from config import *
from datareader import get_reader
from metrics import get_metrics
from utils import check_cuda
logging.root.handlers = []
FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
......@@ -67,7 +69,10 @@ def parse_args():
help='path to resume training based on previous checkpoints. '
'None for not resuming any checkpoints.')
parser.add_argument(
'--use_gpu', type=bool, default=True, help='default use gpu.')
'--use_gpu',
type=ast.literal_eval,
default=True,
help='default use gpu.')
parser.add_argument(
'--no_use_pyreader',
action='store_true',
......@@ -100,7 +105,7 @@ def parse_args():
help='mini-batch interval to log.')
parser.add_argument(
'--enable_ce',
type=bool,
type=ast.literal_eval,
default=False,
help='If set True, enable continuous evaluation job.')
args = parser.parse_args()
......@@ -277,6 +282,8 @@ def train(args):
if __name__ == "__main__":
args = parse_args()
# check whether the installed paddle is compiled with GPU
check_cuda(args.use_gpu)
logger.info(args)
if not os.path.exists(args.save_dir):
......
......@@ -14,6 +14,8 @@
import os
import signal
import paddle
import paddle.fluid as fluid
__all__ = ['AttrDict']
......@@ -24,6 +26,7 @@ def _term(sig_num, addition):
signal.signal(signal.SIGTERM, _term)
signal.signal(signal.SIGINT, _term)
class AttrDict(dict):
......@@ -35,3 +38,14 @@ class AttrDict(dict):
self.__dict__[key] = value
else:
self[key] = value
def check_cuda(use_cuda, err = \
"\nYou can not set use_gpu = True in the model because you are using paddlepaddle-cpu.\n \
Please: 1. Install paddlepaddle-gpu to run your models on GPU or 2. Set use_gpu = False to run models on CPU.\n"
):
try:
if use_cuda == True and fluid.is_compiled_with_cuda() == False:
print(err)
sys.exit(1)
except Exception as e:
pass
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册