提交 dba49e1d 编写于 作者: S sjtubinlong

add optical flow process

上级 8d56d6d7
...@@ -22,22 +22,97 @@ import cv2 ...@@ -22,22 +22,97 @@ import cv2
import paddle.fluid as fluid import paddle.fluid as fluid
def load_model(model_dir, use_gpu=False): def get_round(data):
""" """
Load model files and init paddle predictor get round of data
""" """
prog_file = os.path.join(model_dir, '__model__') round = 0.5 if data >= 0 else -0.5
params_file = os.path.join(model_dir, '__params__') return (int)(data + round)
config = fluid.core.AnalysisConfig(prog_file, params_file)
if use_gpu:
config.enable_use_gpu(100, 0) def human_seg_tracking(pre_gray, cur_gray, prev_cfd, dl_weights, disflow):
config.switch_ir_optim(True) """
human segmentation tracking
"""
check_thres = 8
h, w = pre_gray.shape[:2]
track_cfd = np.zeros_like(prev_cfd)
is_track = np.zeros_like(pre_gray)
flow_fw = disflow.calc(pre_gray, cur_gray, None)
flow_bw = disflow.calc(cur_gray, pre_gray, None)
for r in range(h):
for c in range(w):
fxy_fw = flow_fw[r, c]
dx_fw = get_round(fxy_fw[0])
cur_x = dx_fw + c
dy_fw = get_round(fxy_fw[1])
cur_y = dy_fw + r
if cur_x < 0 or cur_x >= w or cur_y < 0 or cur_y >= h:
continue
fxy_bw = flow_bw[cur_y, cur_x]
dx_bw = get_round(fxy_bw[0])
dy_bw = get_round(fxy_bw[1])
if ((dy_fw + dy_bw) * (dy_fw + dy_bw) + (dx_fw + dx_bw) * (dx_fw + dx_bw)) >= check_thres:
continue
if abs(dy_fw) <= 0 and abs(dx_fw) <= 0 and abs(dy_bw) <= 0 and abs(dx_bw) <= 0:
dl_weights[cur_y, cur_x] = 0.05
is_track[cur_y, cur_x] = 1
track_cfd[cur_y, cur_x] = prev_cfd[r, c]
return track_cfd, is_track, dl_weights
def human_seg_track_fuse(track_cfd, dl_cfd, dl_weights, is_track):
"""
human segmentation tracking fuse
"""
cur_cfd = dl_cfd.copy()
idxs = np.where(is_track > 0)
for i in range(len(idxs)):
x, y = idxs[0][i], idxs[1][i]
dl_score = dl_cfd[y, x]
track_score = track_cfd[y, x]
if dl_score > 0.9 or dl_score < 0.1:
if dl_weights[x, y] < 0.1:
cur_cfd[x, y] = 0.3 * dl_score + 0.7 * track_score
else:
cur_cfd[x, y] = 0.4 * dl_score + 0.6 * track_score
else:
cur_cfd[x, y] = dl_weights[x,y]*dl_score + (1-dl_weights[x,y])*track_score
return cur_cfd
def threshold_mask(img, thresh_bg, thresh_fg):
"""
threshold mask
"""
dst = (img / 255.0 - thresh_bg) / (thresh_fg - thresh_bg)
dst[np.where(dst > 1)] = 1
dst[np.where(dst < 0)] = 0
return dst.astype(np.float32)
def optflow_handle(cur_gray, scoremap, prev_gray, pre_cfd, disflow, is_init):
"""
optical flow handling
"""
w, h = scoremap.shape[0], scoremap.shape[1]
cur_cfd = scoremap.copy()
if is_init:
is_init = False
if h <= 64 or w <= 64:
disflow.setFinestScale(1)
elif h <= 160 or w <= 160:
disflow.setFinestScale(2)
else:
disflow.setFinestScale(3)
fusion_cfd = cur_cfd
else: else:
config.disable_gpu() weights = np.ones((w,h), np.float32) * 0.3
config.disable_glog_info() track_cfd, is_track, weights = human_seg_tracking(prev_gray, cur_gray, pre_cfd, weights, disflow)
config.switch_specify_input_names(True) fusion_cfd = human_seg_track_fuse(track_cfd, cur_cfd, weights, is_track)
config.enable_memory_optim() fusion_cfd = cv2.GaussianBlur(fusion_cfd, (3,3), 0)
return fluid.core.create_paddle_predictor(config) return fusion_cfd
class HumanSeg: class HumanSeg:
...@@ -48,14 +123,31 @@ class HumanSeg: ...@@ -48,14 +123,31 @@ class HumanSeg:
self.mean = np.array(mean).reshape((3, 1, 1)) self.mean = np.array(mean).reshape((3, 1, 1))
self.scale = np.array(scale).reshape((3, 1, 1)) self.scale = np.array(scale).reshape((3, 1, 1))
self.eval_size = eval_size self.eval_size = eval_size
self.predictor = load_model(model_dir, use_gpu) self.load_model(model_dir, use_gpu)
def load_model(self, model_dir, use_gpu):
"""
Load model from model_dir
"""
prog_file = os.path.join(model_dir, '__model__')
params_file = os.path.join(model_dir, '__params__')
config = fluid.core.AnalysisConfig(prog_file, params_file)
if use_gpu:
config.enable_use_gpu(100, 0)
config.switch_ir_optim(True)
else:
config.disable_gpu()
config.disable_glog_info()
config.switch_specify_input_names(True)
config.enable_memory_optim()
self.predictor = fluid.core.create_paddle_predictor(config)
def preprocess(self, image): def preprocess(self, image):
""" """
preprocess image: hwc_rgb to chw_bgr preprocess image: hwc_rgb to chw_bgr
""" """
img_mat = cv2.resize( img_mat = cv2.resize(
image, self.eval_size, fx=0, fy=0, interpolation=cv2.INTER_CUBIC) image, self.eval_size, interpolation=cv2.INTER_LINEAR)
# HWC -> CHW # HWC -> CHW
img_mat = img_mat.swapaxes(1, 2) img_mat = img_mat.swapaxes(1, 2)
img_mat = img_mat.swapaxes(0, 1) img_mat = img_mat.swapaxes(0, 1)
...@@ -71,12 +163,24 @@ class HumanSeg: ...@@ -71,12 +163,24 @@ class HumanSeg:
""" """
postprocess result: merge background with segmentation result postprocess result: merge background with segmentation result
""" """
mask = output_data[0, 1, :, :] scoremap = output_data[0, 1, :, :]
mask = cv2.resize(mask, (image.shape[1], image.shape[0])) scoremap = (scoremap * 255).astype(np.uint8)
scoremap = np.repeat(mask[:, :, np.newaxis], 3, axis=2) ori_h, ori_w = image.shape[0], image.shape[1]
bg_im = np.ones_like(scoremap) * 255 evl_h, evl_w = self.eval_size[0], self.eval_size[1]
merge_im = (scoremap * image + (1 - scoremap) * bg_im).astype(np.uint8) disflow = cv2.DISOpticalFlow_create(
return merge_im cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)
prev_gray = np.zeros((evl_h, evl_w), np.uint8)
prev_cfd = np.zeros((evl_h, evl_w), np.float32)
cur_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cur_gray = cv2.resize(cur_gray, (evl_w, evl_h))
optflow_map = optflow_handle(cur_gray, scoremap, prev_gray, prev_cfd, disflow, False)
optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)
optflow_map = threshold_mask(optflow_map, thresh_bg=0.2, thresh_fg=0.8)
optflow_map = cv2.resize(optflow_map, (ori_w, ori_h))
optflow_map = np.repeat(optflow_map[:, :, np.newaxis], 3, axis=2)
bg = np.ones_like(optflow_map) * 255
comb = (optflow_map * image + (1 - optflow_map) * bg).astype(np.uint8)
return comb
def run_predict(self, image): def run_predict(self, image):
""" """
...@@ -92,7 +196,7 @@ class HumanSeg: ...@@ -92,7 +196,7 @@ class HumanSeg:
def predict_image(seg, image_path): def predict_image(seg, image_path):
""" """
Do Predicting on a image Do Predicting on a single image
""" """
img_mat = cv2.imread(image_path) img_mat = cv2.imread(image_path)
img_mat = seg.run_predict(img_mat) img_mat = seg.run_predict(img_mat)
...@@ -114,20 +218,25 @@ def predict_video(seg, video_path): ...@@ -114,20 +218,25 @@ def predict_video(seg, video_path):
out = cv2.VideoWriter('result.avi', out = cv2.VideoWriter('result.avi',
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps,
(width, height)) (width, height))
id = 1
# Start capturing from video # Start capturing from video
while cap.isOpened(): while cap.isOpened():
ret, frame = cap.read() ret, frame = cap.read()
if ret: if ret:
img_mat = seg.run_predict(frame) img_mat = seg.run_predict(frame)
out.write(img_mat) out.write(img_mat)
id += 1
if id >= 51:
break
else: else:
break break
cap.release() cap.release()
out.release() out.release()
def predict_camera(seg): def predict_camera(seg):
""" """
Do Predicting on a camera video stream Do Predicting on a camera video stream: Press q to exit
""" """
cap = cv2.VideoCapture(0) cap = cv2.VideoCapture(0)
if not cap.isOpened(): if not cap.isOpened():
...@@ -138,13 +247,14 @@ def predict_camera(seg): ...@@ -138,13 +247,14 @@ def predict_camera(seg):
ret, frame = cap.read() ret, frame = cap.read()
if ret: if ret:
img_mat = seg.run_predict(frame) img_mat = seg.run_predict(frame)
cv2.imshow('Frame', img_mat) cv2.imshow('HumanSegmentation', img_mat)
if cv2.waitKey(1) & 0xFF == ord('q'): if cv2.waitKey(1) & 0xFF == ord('q'):
break break
else: else:
break break
cap.release() cap.release()
def main(argv): def main(argv):
""" """
Entrypoint of the script Entrypoint of the script
...@@ -162,8 +272,9 @@ def main(argv): ...@@ -162,8 +272,9 @@ def main(argv):
eval_size = (192, 192) eval_size = (192, 192)
seg = HumanSeg(model_dir, mean, scale, eval_size, use_gpu) seg = HumanSeg(model_dir, mean, scale, eval_size, use_gpu)
# Run Predicting on a video and result will be saved as result.avi # Run Predicting on a video and result will be saved as result.avi
# predict_camera(seg) predict_camera(seg)
predict_video(seg, input_path) #predict_video(seg, input_path)
#predict_image(seg, input_path)
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册