未验证 提交 eeebef9f 编写于 作者: X xiaoluomi 提交者: GitHub

fix rtdetr yaml and infer (#8268)

上级 79267419
...@@ -3,7 +3,7 @@ Global: ...@@ -3,7 +3,7 @@ Global:
reader_config: configs/rtdetr_reader.yml reader_config: configs/rtdetr_reader.yml
include_nms: True include_nms: True
Evaluation: True Evaluation: True
model_dir: ./rtdetr_r50vd_6x_coco/ model_dir: ./rtdetr_hgnetv2_x_6x_coco/
model_filename: model.pdmodel model_filename: model.pdmodel
params_filename: model.pdiparams params_filename: model.pdiparams
......
...@@ -3,7 +3,7 @@ Global: ...@@ -3,7 +3,7 @@ Global:
reader_config: configs/rtdetr_reader.yml reader_config: configs/rtdetr_reader.yml
include_nms: True include_nms: True
Evaluation: True Evaluation: True
model_dir: ./rtdetr_hgnetv2_x_6x_coco/ model_dir: ./rtdetr_r101vd_6x_coco/
model_filename: model.pdmodel model_filename: model.pdmodel
params_filename: model.pdiparams params_filename: model.pdiparams
......
...@@ -12,6 +12,18 @@ TrainDataset: ...@@ -12,6 +12,18 @@ TrainDataset:
anno_path: annotations/instances_val2017.json anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco/ dataset_dir: dataset/coco/
EvalDataset:
!COCODataSet
image_dir: val2017
anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco/
TestDataset:
!COCODataSet
image_dir: val2017
anno_path: annotations/instances_val2017.json
dataset_dir: dataset/coco/
worker_num: 0 worker_num: 0
# preprocess reader in test # preprocess reader in test
......
...@@ -284,48 +284,6 @@ def load_predictor( ...@@ -284,48 +284,6 @@ def load_predictor(
return predictor, rerun_flag return predictor, rerun_flag
def get_current_memory_mb():
"""
It is used to Obtain the memory usage of the CPU and GPU during the running of the program.
And this function Current program is time-consuming.
"""
try:
pkg.require('pynvml')
except:
from pip._internal import main
main(['install', 'pynvml'])
try:
pkg.require('psutil')
except:
from pip._internal import main
main(['install', 'psutil'])
try:
pkg.require('GPUtil')
except:
from pip._internal import main
main(['install', 'GPUtil'])
import pynvml
import psutil
import GPUtil
gpu_id = int(os.environ.get("CUDA_VISIBLE_DEVICES", 0))
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
cpu_mem = info.uss / 1024.0 / 1024.0
gpu_mem = 0
gpu_percent = 0
gpus = GPUtil.getGPUs()
if gpu_id is not None and len(gpus) > 0:
gpu_percent = gpus[gpu_id].load
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem = meminfo.used / 1024.0 / 1024.0
return round(cpu_mem, 4), round(gpu_mem, 4)
def predict_image(predictor, def predict_image(predictor,
image_file, image_file,
image_shape=[640, 640], image_shape=[640, 640],
...@@ -367,13 +325,7 @@ def predict_image(predictor, ...@@ -367,13 +325,7 @@ def predict_image(predictor,
time_min = min(time_min, timed) time_min = min(time_min, timed)
time_max = max(time_max, timed) time_max = max(time_max, timed)
predict_time += timed predict_time += timed
cpu_mem, gpu_mem = get_current_memory_mb()
cpu_mems += cpu_mem
gpu_mems += gpu_mem
time_avg = predict_time / repeats time_avg = predict_time / repeats
print("[Benchmark]Avg cpu_mem:{} MB, avg gpu_mem: {} MB".format(
cpu_mems / repeats, gpu_mems / repeats))
print("[Benchmark]Inference time(ms): min={}, max={}, avg={}".format( print("[Benchmark]Inference time(ms): min={}, max={}, avg={}".format(
round(time_min * 1000, 2), round(time_min * 1000, 2),
round(time_max * 1000, 1), round(time_avg * 1000, 1))) round(time_max * 1000, 1), round(time_avg * 1000, 1)))
...@@ -418,9 +370,6 @@ def eval(predictor, val_loader, metric, rerun_flag=False): ...@@ -418,9 +370,6 @@ def eval(predictor, val_loader, metric, rerun_flag=False):
time_min = min(time_min, timed) time_min = min(time_min, timed)
time_max = max(time_max, timed) time_max = max(time_max, timed)
predict_time += timed predict_time += timed
cpu_mem, gpu_mem = get_current_memory_mb()
cpu_mems += cpu_mem
gpu_mems += gpu_mem
if not FLAGS.include_nms: if not FLAGS.include_nms:
postprocess = PPYOLOEPostProcess( postprocess = PPYOLOEPostProcess(
score_threshold=0.3, nms_threshold=0.6) score_threshold=0.3, nms_threshold=0.6)
...@@ -436,8 +385,6 @@ def eval(predictor, val_loader, metric, rerun_flag=False): ...@@ -436,8 +385,6 @@ def eval(predictor, val_loader, metric, rerun_flag=False):
map_res = metric.get_results() map_res = metric.get_results()
metric.reset() metric.reset()
time_avg = predict_time / sample_nums time_avg = predict_time / sample_nums
print("[Benchmark]Avg cpu_mem:{} MB, avg gpu_mem: {} MB".format(
cpu_mems / sample_nums, gpu_mems / sample_nums))
print("[Benchmark]Inference time(ms): min={}, max={}, avg={}".format( print("[Benchmark]Inference time(ms): min={}, max={}, avg={}".format(
round(time_min * 1000, 2), round(time_min * 1000, 2),
round(time_max * 1000, 1), round(time_avg * 1000, 1))) round(time_max * 1000, 1), round(time_avg * 1000, 1)))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册