提交 149e1184 编写于 作者: Q qili93

[TIPC] add scripts for NPU and XPU, test=develop

上级 2cfb23cf
#!/bin/bash
source test_tipc/common_func.sh
function readlinkf() {
perl -MCwd -e 'print Cwd::abs_path shift' "$1";
}
function func_parser_config() {
strs=$1
IFS=" "
array=(${strs})
tmp=${array[2]}
echo ${tmp}
}
BASEDIR=$(dirname "$0")
REPO_ROOT_PATH=$(readlinkf ${BASEDIR}/../)
FILENAME=$1
# disable mkldnn on non x86_64 env
arch=$(uname -i)
if [ $arch != 'x86_64' ]; then
sed -i 's/--enable_mkldnn:True|False/--enable_mkldnn:False/g' $FILENAME
sed -i 's/--enable_mkldnn:True/--enable_mkldnn:False/g' $FILENAME
fi
# change gpu to npu in tipc txt configs
sed -i 's/use_gpu/use_npu/g' $FILENAME
# disable benchmark as AutoLog required nvidia-smi command
sed -i 's/--benchmark:True/--benchmark:False/g' $FILENAME
dataline=`cat $FILENAME`
# parser params
IFS=$'\n'
lines=(${dataline})
# replace training config file
grep -n 'tools/.*yml' $FILENAME | cut -d ":" -f 1 \
| while read line_num ; do
train_cmd=$(func_parser_value "${lines[line_num-1]}")
trainer_config=$(func_parser_config ${train_cmd})
sed -i 's/use_gpu/use_npu/g' "$REPO_ROOT_PATH/$trainer_config"
done
# change gpu to npu in execution script
sed -i 's/\"gpu\"/\"npu\"/g' test_tipc/test_train_inference_python.sh
# pass parameters to test_train_inference_python.sh
cmd='bash test_tipc/test_train_inference_python.sh ${FILENAME} $2'
echo -e '\033[1;32m Started to run command: ${cmd}! \033[0m'
eval $cmd
#!/bin/bash
source test_tipc/common_func.sh
function readlinkf() {
perl -MCwd -e 'print Cwd::abs_path shift' "$1";
}
function func_parser_config() {
strs=$1
IFS=" "
array=(${strs})
tmp=${array[2]}
echo ${tmp}
}
BASEDIR=$(dirname "$0")
REPO_ROOT_PATH=$(readlinkf ${BASEDIR}/../)
FILENAME=$1
# disable mkldnn on non x86_64 env
arch=$(uname -i)
if [ $arch != 'x86_64' ]; then
sed -i 's/--enable_mkldnn:True|False/--enable_mkldnn:False/g' $FILENAME
sed -i 's/--enable_mkldnn:True/--enable_mkldnn:False/g' $FILENAME
fi
# change gpu to xpu in tipc txt configs
sed -i 's/use_gpu/use_xpu/g' $FILENAME
# disable benchmark as AutoLog required nvidia-smi command
sed -i 's/--benchmark:True/--benchmark:False/g' $FILENAME
dataline=`cat $FILENAME`
# parser params
IFS=$'\n'
lines=(${dataline})
# replace training config file
grep -n 'tools/.*yml' $FILENAME | cut -d ":" -f 1 \
| while read line_num ; do
train_cmd=$(func_parser_value "${lines[line_num-1]}")
trainer_config=$(func_parser_config ${train_cmd})
sed -i 's/use_gpu/use_xpu/g' "$REPO_ROOT_PATH/$trainer_config"
done
# change gpu to xpu in execution script
sed -i 's/\"gpu\"/\"xpu\"/g' test_tipc/test_train_inference_python.sh
# pass parameters to test_train_inference_python.sh
cmd='bash test_tipc/test_train_inference_python.sh ${FILENAME} $2'
echo -e '\033[1;32m Started to run command: ${cmd}! \033[0m'
eval $cmd
......@@ -36,6 +36,7 @@ def init_args():
# params for prediction engine
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--use_xpu", type=str2bool, default=False)
parser.add_argument("--use_npu", type=str2bool, default=False)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--min_subgraph_size", type=int, default=15)
......@@ -245,6 +246,8 @@ def create_predictor(args, mode, logger):
f"when using tensorrt, dynamic shape is a suggested option, you can use '--shape_info_filename=shape.txt' for offline dygnamic shape tuning"
)
elif args.use_npu:
config.enable_npu()
elif args.use_xpu:
config.enable_xpu(10 * 1024 * 1024)
else:
......@@ -413,7 +416,8 @@ def draw_ocr_box_txt(image,
for idx, (box, txt) in enumerate(zip(boxes, txts)):
if scores is not None and scores[idx] < drop_score:
continue
color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
color = (random.randint(0, 255), random.randint(0, 255),
random.randint(0, 255))
draw_left.polygon(box, fill=color)
img_right_text = draw_box_txt_fine((w, h), box, txt, font_path)
pts = np.array(box, np.int32).reshape((-1, 1, 2))
......@@ -427,8 +431,10 @@ def draw_ocr_box_txt(image,
def draw_box_txt_fine(img_size, box, txt, font_path="./doc/fonts/simfang.ttf"):
box_height = int(math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][1])**2))
box_width = int(math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][1])**2))
box_height = int(
math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][1])**2))
box_width = int(
math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][1])**2))
if box_height > 2 * box_width and box_height > 30:
img_text = Image.new('RGB', (box_height, box_width), (255, 255, 255))
......@@ -444,15 +450,19 @@ def draw_box_txt_fine(img_size, box, txt, font_path="./doc/fonts/simfang.ttf"):
font = create_font(txt, (box_width, box_height), font_path)
draw_text.text([0, 0], txt, fill=(0, 0, 0), font=font)
pts1 = np.float32([[0, 0], [box_width, 0], [box_width, box_height], [0, box_height]])
pts1 = np.float32(
[[0, 0], [box_width, 0], [box_width, box_height], [0, box_height]])
pts2 = np.array(box, dtype=np.float32)
M = cv2.getPerspectiveTransform(pts1, pts2)
img_text = np.array(img_text, dtype=np.uint8)
img_right_text = cv2.warpPerspective(img_text, M, img_size,
flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(255, 255, 255))
img_right_text = cv2.warpPerspective(
img_text,
M,
img_size,
flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(255, 255, 255))
return img_right_text
......
......@@ -114,7 +114,7 @@ def merge_config(config, opts):
return config
def check_device(use_gpu, use_xpu=False):
def check_device(use_gpu, use_xpu=False, use_npu=False):
"""
Log error and exit when set use_gpu=true in paddlepaddle
cpu version.
......@@ -134,24 +134,8 @@ def check_device(use_gpu, use_xpu=False):
if use_xpu and not paddle.device.is_compiled_with_xpu():
print(err.format("use_xpu", "xpu", "xpu", "use_xpu"))
sys.exit(1)
except Exception as e:
pass
def check_xpu(use_xpu):
"""
Log error and exit when set use_xpu=true in paddlepaddle
cpu/gpu version.
"""
err = "Config use_xpu cannot be set as true while you are " \
"using paddlepaddle cpu/gpu version ! \nPlease try: \n" \
"\t1. Install paddlepaddle-xpu to run model on XPU \n" \
"\t2. Set use_xpu as false in config file to run " \
"model on CPU/GPU"
try:
if use_xpu and not paddle.is_compiled_with_xpu():
print(err)
if use_npu and not paddle.device.is_compiled_with_npu():
print(err.format("use_npu", "npu", "npu", "use_npu"))
sys.exit(1)
except Exception as e:
pass
......@@ -279,7 +263,9 @@ def train(config,
model_average = True
# use amp
if scaler:
with paddle.amp.auto_cast(level=amp_level, custom_black_list=amp_custom_black_list):
with paddle.amp.auto_cast(
level=amp_level,
custom_black_list=amp_custom_black_list):
if model_type == 'table' or extra_input:
preds = model(images, data=batch[1:])
elif model_type in ["kie"]:
......@@ -479,7 +465,7 @@ def eval(model,
extra_input=False,
scaler=None,
amp_level='O2',
amp_custom_black_list = []):
amp_custom_black_list=[]):
model.eval()
with paddle.no_grad():
total_frame = 0.0
......@@ -500,7 +486,9 @@ def eval(model,
# use amp
if scaler:
with paddle.amp.auto_cast(level=amp_level, custom_black_list=amp_custom_black_list):
with paddle.amp.auto_cast(
level=amp_level,
custom_black_list=amp_custom_black_list):
if model_type == 'table' or extra_input:
preds = model(images, data=batch[1:])
elif model_type in ["kie"]:
......@@ -627,14 +615,9 @@ def preprocess(is_train=False):
logger = get_logger(log_file=log_file)
# check if set use_gpu=True in paddlepaddle cpu version
use_gpu = config['Global']['use_gpu']
use_gpu = config['Global'].get('use_gpu', False)
use_xpu = config['Global'].get('use_xpu', False)
# check if set use_xpu=True in paddlepaddle cpu/gpu version
use_xpu = False
if 'use_xpu' in config['Global']:
use_xpu = config['Global']['use_xpu']
check_xpu(use_xpu)
use_npu = config['Global'].get('use_npu', False)
alg = config['Architecture']['algorithm']
assert alg in [
......@@ -647,10 +630,12 @@ def preprocess(is_train=False):
if use_xpu:
device = 'xpu:{0}'.format(os.getenv('FLAGS_selected_xpus', 0))
elif use_npu:
device = 'npu:{0}'.format(os.getenv('FLAGS_selected_npus', 0))
else:
device = 'gpu:{}'.format(dist.ParallelEnv()
.dev_id) if use_gpu else 'cpu'
check_device(use_gpu, use_xpu)
check_device(use_gpu, use_xpu, use_npu)
device = paddle.set_device(device)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册