提交 cfee84c4 编写于 作者: G gaotingquan 提交者: Tingquan Gao

refactor

上级 9b4b7a67
Global:
Engine: POPEngine
infer_imgs: "../../images/wangzai.jpg"
AlgoModule: AlgoModule:
- preprocess: - Module:
- processor_type: data_processor preprocess:
processor_name: image_processor name: ImageProcessor
image_processors: processors:
- ResizeImage: - ResizeImage:
size: [640, 640] size: [640, 640]
interpolation: 2 interpolation: 2
- NormalizeImage: - NormalizeImage:
scale: 0.00392157 scale: 0.00392157
mean: [0.485, 0.456, 0.406] mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225] std: [0.229, 0.224, 0.225]
- ToRGB order: hwc
- ToCHWImage:
- GetShapeInfo:
order: chw
- ToBatch:
predictor:
inference_model_dir: ./models/ppyolov2_r50vd_dcn_mainbody_v1.0_infer/
input_names:
output_names:
postprocess:
name: DetPostProcessor
threshold: 0.2
max_det_results: 1
label_list:
- foreground
\ No newline at end of file
from engine.pop_engine import POPEngine
# import pipe engine, etc
# TODO(gaotingquan): build engine according to config
def build_engine(config): def build_engine(config):
pass return POPEngine(config)
from ..processor import build_processor from processor.algo_mod import AlgoMod
class POPEngine: class POPEngine:
def __init__(self, config): def __init__(self, config):
self.processor_list = [] self.algo_list = []
last_algo_type = "start" # last_algo_type = "start"
for processor_config in config["Processors"]: for algo_config in config["AlgoModule"]:
processor_config["last_algo_type"] = last_algo_type # algo_config["last_algo_type"] = last_algo_type
self.processor_list.append(build_processor(processor_config)) self.algo_list.append(AlgoMod(algo_config["Module"]))
last_algo_type = processor_config["type"] # last_algo_type = algo_config["type"]
def process(self, x): def process(self, x):
for processor in self.processor_list: for algo_module in self.algo_list:
x = processor.process(x) x = algo_module.process(x)
return x return x
from ..engine import build_engine import os
from ..utils import config import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(__dir__, '../')))
import cv2
from engine import build_engine
from utils import config
from utils.get_image_list import get_image_list
def main(): def main():
...@@ -9,6 +17,13 @@ def main(): ...@@ -9,6 +17,13 @@ def main():
config_dict.profiler_options = args.profiler_options config_dict.profiler_options = args.profiler_options
engine = build_engine(config_dict) engine = build_engine(config_dict)
image_list = get_image_list(config_dict["Global"]["infer_imgs"])
for idx, image_file in enumerate(image_list):
img = cv2.imread(image_file)[:, :, ::-1]
input_data = {"input_image": img}
output = engine.process(input_data)
print(output)
if __name__ == '__main__': if __name__ == '__main__':
main() main()
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from processor.algo_mod import predictors, searcher from processor.algo_mod import searcher
from processor.algo_mod.predictors import build_predictor
# def build_processor(config):
# print(config)
# processor_type = config.get("processor_type")
# processor_mod = locals()[processor_type]
# processor_name = config.get("processor_name")
# return getattr(processor_mod, processor_name)
def build_processor(config): # class BaseProcessor(ABC):
processor_type = config.get("processor_type") # @abstractmethod
processor_mod = locals()[processor_type] # def __init__(self, config):
processor_name = config.get("processor_name") # pass
return getattr(processor_mod, processor_name)
# @abstractmethod
class BaseProcessor(ABC): # def process(self, input_data):
@abstractmethod # pass
def __init__(self, config):
pass
@abstractmethod
def process(self, input_data):
pass
from .. import BaseProcessor, build_processor from processor.algo_mod.data_processor import ImageProcessor
from processor.algo_mod.post_processor.det import DetPostProcessor
from processor.algo_mod.predictors import build_predictor
class AlgoMod(BaseProcessor): def build_processor(config):
# processor_type = config.get("processor_type")
# processor_mod = locals()[processor_type]
processor_name = config.get("name")
return eval(processor_name)(config)
class AlgoMod(object):
def __init__(self, config): def __init__(self, config):
self.pre_processor = build_processor(config["pre_processor"]) self.pre_processor = build_processor(config["preprocess"])
self.predictor = build_processor(config["predictor"]) self.predictor = build_predictor(config["predictor"])
self.post_processor = build_processor(config["post_processor"]) self.post_processor = build_processor(config["postprocess"])
def process(self, input_data): def process(self, input_data):
input_data = self.pre_processor(input_data) input_data = self.pre_processor.process(input_data)
input_data = self.predictor(input_data) input_data = self.predictor.process(input_data)
input_data = self.post_processor(input_data) input_data = self.post_processor.process(input_data)
return input_data return input_data
from image_processor import ImageProcessor from processor.algo_mod.data_processor.image_processor import ImageProcessor
from processor.algo_mod.data_processor.bbox_cropper import BBoxCropper
from processor import BaseProcessor from processor.algo_mod.data_processor.image_processor import BaseProcessor
class BBoxCropper(BaseProcessor): class BBoxCropper(BaseProcessor):
......
...@@ -6,16 +6,29 @@ from PIL import Image ...@@ -6,16 +6,29 @@ from PIL import Image
import paddle import paddle
from utils import logger from utils import logger
from processor import BaseProcessor # from processor import BaseProcessor
from abc import ABC, abstractmethod
class ImageProcessor(BaseProcessor):
class BaseProcessor(ABC):
@abstractmethod
def __init__(self, *args, **kwargs):
pass
@abstractmethod
def process(self, input_data):
pass
class ImageProcessor(object):
def __init__(self, config): def __init__(self, config):
self.processors = [] self.processors = []
for processor_config in config.get("image_processors"): for processor_config in config.get("processors"):
name = list(processor_config)[0] name = list(processor_config)[0]
param = {} if processor_config[name] is None else processor_config[name] param = {} if processor_config[name] is None else processor_config[
op = locals()[name](**param) name]
op = eval(name)(**param)
self.processors.append(op) self.processors.append(op)
def process(self, input_data): def process(self, input_data):
...@@ -30,25 +43,53 @@ class ImageProcessor(BaseProcessor): ...@@ -30,25 +43,53 @@ class ImageProcessor(BaseProcessor):
class GetShapeInfo(BaseProcessor): class GetShapeInfo(BaseProcessor):
def __init__(self): def __init__(self, order="hwc"):
pass super().__init__()
self.order = order
def process(self, input_data): def process(self, input_data):
input_image = input_data["input_image"] input_image = input_data["input_image"]
image = input_data["image"] image = input_data["image"]
input_data['im_shape'] = np.array(input_image.shape[:2], dtype=np.float32) if self.order == "hwc":
input_data['im_shape'] = np.array(
(image.shape[:2], ), dtype=np.float32)
input_data['scale_factor'] = np.array(
[
image.shape[0] / input_image.shape[0],
image.shape[1] / input_image.shape[1]
],
dtype=np.float32)
else:
input_data['im_shape'] = np.array(
(image.shape[1:], ), dtype=np.float32)
input_data['scale_factor'] = np.array(
[
image.shape[2] / input_image.shape[0],
image.shape[1] / input_image.shape[1]
],
dtype=np.float32)
input_data['input_shape'] = np.array(image.shape[:2], dtype=np.float32) input_data['input_shape'] = np.array(image.shape[:2], dtype=np.float32)
input_data['scale_factor'] = np.array([image.shape[0] / input_image.shape[0], print(image.shape[0])
image.shape[1] / input_image.shape[1]], dtype=np.float32) return input_data
class ToTensor(BaseProcessor): # class ToTensor(BaseProcessor):
def __init__(self, config): # def __init__(self):
pass # super().__init__()
# def process(self, input_data):
# image = input_data["image"]
# input_data["input_tensor"] = paddle.to_tensor(image)
# return input_data
class ToBatch(BaseProcessor):
def __init__(self):
super().__init__()
def process(self, input_data): def process(self, input_data):
image = input_data["image"] image = input_data["image"]
input_data["input_tensor"] = paddle.to_tensor(image) input_data["image"] = image[np.newaxis, :, :, :]
return input_data return input_data
...@@ -123,8 +164,7 @@ class ResizeImage: ...@@ -123,8 +164,7 @@ class ResizeImage:
else: else:
logger.warning( logger.warning(
f"The backend of Resize only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. " f"The backend of Resize only support \"cv2\" or \"PIL\". \"f{backend}\" is unavailable. "
f"Use \"cv2\" instead." f"Use \"cv2\" instead.")
)
self.resize_func = cv2.resize self.resize_func = cv2.resize
def __call__(self, img): def __call__(self, img):
...@@ -191,7 +231,8 @@ class NormalizeImage: ...@@ -191,7 +231,8 @@ class NormalizeImage:
self.std = np.array(std).reshape(shape).astype('float32') self.std = np.array(std).reshape(shape).astype('float32')
def __call__(self, img): def __call__(self, img):
assert isinstance(img, np.ndarray), "invalid input 'img' in NormalizeImage" assert isinstance(img,
np.ndarray), "invalid input 'img' in NormalizeImage"
img = (img.astype('float32') * self.scale - self.mean) / self.std img = (img.astype('float32') * self.scale - self.mean) / self.std
......
from functools import reduce
import numpy as np
class DetPostProcessor(object):
def __init__(self, config):
super().__init__()
self.threshold = config["threshold"]
self.label_list = config["label_list"]
self.max_det_results = config["max_det_results"]
def process(self, pred):
np_boxes = pred["save_infer_model/scale_0.tmp_1"]
if reduce(lambda x, y: x * y, np_boxes.shape) < 6:
print('[WARNNING] No object detected.')
np_boxes = np.array([])
keep_indexes = np_boxes[:, 1].argsort()[::-1][:self.max_det_results]
results = []
for idx in keep_indexes:
single_res = np_boxes[idx]
class_id = int(single_res[0])
score = single_res[1]
bbox = single_res[2:]
if score < self.threshold:
continue
label_name = self.label_list[class_id]
results.append({
"class_id": class_id,
"score": score,
"bbox": bbox,
"label_name": label_name,
})
return results
from .fake_cls import FakeClassifier from processor.algo_mod.predictors.paddle_predictor import Predictor as paddle_predictor
from processor.algo_mod.predictors.onnx_predictor import Predictor as onnx_predictor
def build_algo_mod(config): def build_predictor(config):
algo_name = config.get("algo_name") # if use paddle backend
if algo_name == "fake_clas": if True:
return FakeClassifier(config) return paddle_predictor(config)
# if use onnx backend
else:
return onnx_predictor(config)
\ No newline at end of file
from processor import BaseProcessor
class FakeClassifier(BaseProcessor):
def __init__(self, config):
pass
def process(self, input_data):
pass
class FakeDetector:
def __init__(self):
pass
def predict(self):
pass
class Predictor(object):
def __init__(self, config):
super().__init__()
from paddle.inference import create_predictor, Config import os
import platform
from paddle.inference import create_predictor
from paddle.inference import Config as PaddleConfig
class Predictor(object):
def __init__(self, config):
super().__init__()
# HALF precission predict only work when using tensorrt
if config.get("use_fp16", False):
assert config.get("use_tensorrt", False) is True
inference_model_dir = config["inference_model_dir"]
params_file = os.path.join(inference_model_dir, "inference.pdiparams")
model_file = os.path.join(inference_model_dir, "inference.pdmodel")
paddle_config = PaddleConfig(model_file, params_file)
if config.get("use_gpu", False):
paddle_config.enable_use_gpu(config.get("gpu_mem", 8000), 0)
else:
paddle_config.disable_gpu()
if config.get("enable_mkldnn", False):
# there is no set_mkldnn_cache_capatity() on macOS
if platform.system() != "Darwin":
# cache 10 different shapes for mkldnn to avoid memory leak
paddle_config.set_mkldnn_cache_capacity(10)
paddle_config.enable_mkldnn()
paddle_config.set_cpu_math_library_num_threads(
config.get("cpu_num_threads", 10))
if config.get("enable_profile", False):
paddle_config.enable_profile()
paddle_config.disable_glog_info()
paddle_config.switch_ir_optim(config.get("ir_optim",
True)) # default true
if config.get("use_tensorrt", True):
paddle_config.enable_tensorrt_engine(
precision_mode=PaddleConfig.Precision.Half
if config.get("use_fp16", False) else
PaddleConfig.Precision.Float32,
max_batch_size=config.get("batch_size", 1),
workspace_size=1 << 30,
min_subgraph_size=30)
paddle_config.enable_memory_optim()
# use zero copy
paddle_config.switch_use_feed_fetch_ops(False)
self.predictor = create_predictor(paddle_config)
def process(self, input_data):
input_names = self.predictor.get_input_names()
for input_name in input_names:
input_tensor = self.predictor.get_input_handle(input_name)
input_tensor.copy_from_cpu(input_data[input_name])
self.predictor.run()
output_data = {}
output_names = self.predictor.get_output_names()
for output_name in output_names:
output = self.predictor.get_output_handle(output_name)
output_data[output_name] = output.copy_to_cpu()
return output_data
import os
import argparse
import base64
import numpy as np
def get_image_list(img_file):
imgs_lists = []
if img_file is None or not os.path.exists(img_file):
raise Exception("not found any img file in {}".format(img_file))
img_end = ['jpg', 'png', 'jpeg', 'JPEG', 'JPG', 'bmp']
if os.path.isfile(img_file) and img_file.split('.')[-1] in img_end:
imgs_lists.append(img_file)
elif os.path.isdir(img_file):
for single_file in os.listdir(img_file):
if single_file.split('.')[-1] in img_end:
imgs_lists.append(os.path.join(img_file, single_file))
if len(imgs_lists) == 0:
raise Exception("not found any img file in {}".format(img_file))
imgs_lists = sorted(imgs_lists)
return imgs_lists
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册