未验证 提交 b17387b7 编写于 作者: W wuyefeilin 提交者: GitHub

Update Humanseg code(#241)

* update video download and infer

* update video_infer.py

* fix humanseg postprocess

* update visualdl

* updata optflow

* rm humanseg_server_quant
上级 5d3e89a7
...@@ -29,7 +29,10 @@ def download_data(savepath, extrapath): ...@@ -29,7 +29,10 @@ def download_data(savepath, extrapath):
url = "https://paddleseg.bj.bcebos.com/humanseg/data/video_test.zip" url = "https://paddleseg.bj.bcebos.com/humanseg/data/video_test.zip"
download_file_and_uncompress( download_file_and_uncompress(
url=url, savepath=savepath, extrapath=extrapath) url=url,
savepath=savepath,
extrapath=extrapath,
extraname='video_test.mp4')
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -493,10 +493,7 @@ class SegModel(object): ...@@ -493,10 +493,7 @@ class SegModel(object):
if use_vdl: if use_vdl:
from visualdl import LogWriter from visualdl import LogWriter
vdl_logdir = osp.join(save_dir, 'vdl_log') vdl_logdir = osp.join(save_dir, 'vdl_log')
log_writer = LogWriter(vdl_logdir, sync_cycle=20) log_writer = LogWriter(vdl_logdir)
train_step_component = OrderedDict()
eval_component = OrderedDict()
best_miou = -1.0 best_miou = -1.0
best_model_epoch = 1 best_model_epoch = 1
for i in range(self.begin_epoch, num_epochs): for i in range(self.begin_epoch, num_epochs):
...@@ -527,13 +524,10 @@ class SegModel(object): ...@@ -527,13 +524,10 @@ class SegModel(object):
if use_vdl: if use_vdl:
for k, v in step_metrics.items(): for k, v in step_metrics.items():
if k not in train_step_component.keys(): log_writer.add_scalar(
with log_writer.mode('Each_step_while_Training' step=num_steps,
) as step_logger: tag='train/{}'.format(k),
train_step_component[ value=v)
k] = step_logger.scalar(
'Training: {}'.format(k))
train_step_component[k].add_record(num_steps, v)
# 计算剩余时间 # 计算剩余时间
avg_step_time = np.mean(time_stat) avg_step_time = np.mean(time_stat)
...@@ -587,12 +581,10 @@ class SegModel(object): ...@@ -587,12 +581,10 @@ class SegModel(object):
if isinstance(v, np.ndarray): if isinstance(v, np.ndarray):
if v.size > 1: if v.size > 1:
continue continue
if k not in eval_component: log_writer.add_scalar(
with log_writer.mode('Each_Epoch_on_Eval_Data' step=num_steps,
) as eval_logger: tag='evaluate/{}'.format(k),
eval_component[k] = eval_logger.scalar( value=v)
'Evaluation: {}'.format(k))
eval_component[k].add_record(i + 1, v)
self.save_model(save_dir=current_save_dir) self.save_model(save_dir=current_save_dir)
time_eval_one_epoch = time.time() - eval_epoch_start_time time_eval_one_epoch = time.time() - eval_epoch_start_time
if eval_dataset is not None: if eval_dataset is not None:
......
...@@ -26,8 +26,6 @@ model_urls = { ...@@ -26,8 +26,6 @@ model_urls = {
"https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_server.zip", "https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_server.zip",
"humanseg_server_export": "humanseg_server_export":
"https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_server_export.zip", "https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_server_export.zip",
"humanseg_server_quant":
"https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_server_quant.zip",
"humanseg_mobile": "humanseg_mobile":
"https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_mobile.zip", "https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_mobile.zip",
"humanseg_mobile_export": "humanseg_mobile_export":
...@@ -36,7 +34,7 @@ model_urls = { ...@@ -36,7 +34,7 @@ model_urls = {
"https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_mobile_quant.zip", "https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_mobile_quant.zip",
"humanseg_lite": "humanseg_lite":
"https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_lite.zip", "https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_lite.zip",
"humanseg_lite_epxort": "humanseg_lite_export":
"https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_lite_export.zip", "https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_lite_export.zip",
"humanseg_lite_quant": "humanseg_lite_quant":
"https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_lite_quant.zip", "https://paddleseg.bj.bcebos.com/humanseg/models/humanseg_lite_quant.zip",
......
pre-commit
yapf == 0.26.0
flake8
pyyaml >= 5.1 pyyaml >= 5.1
visualdl >= 1.3.0 visualdl == 2.0.0-alpha.1
Pillow Pillow
numpy
six
opencv-python
tqdm tqdm
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import numpy as np import numpy as np
import cv2 import cv2
import os
def get_round(data):
round = 0.5 if data >= 0 else -0.5
return (int)(data + round)
def humanseg_tracking(pre_gray, cur_gray, prev_cfd, dl_weights, disflow):
def human_seg_tracking(pre_gray, cur_gray, prev_cfd, dl_weights, disflow):
"""计算光流跟踪匹配点和光流图 """计算光流跟踪匹配点和光流图
输入参数: 输入参数:
pre_gray: 上一帧灰度图 pre_gray: 上一帧灰度图
...@@ -31,133 +21,102 @@ def humanseg_tracking(pre_gray, cur_gray, prev_cfd, dl_weights, disflow): ...@@ -31,133 +21,102 @@ def humanseg_tracking(pre_gray, cur_gray, prev_cfd, dl_weights, disflow):
track_cfd: 光流跟踪图 track_cfd: 光流跟踪图
""" """
check_thres = 8 check_thres = 8
hgt, wdh = pre_gray.shape[:2] h, w = pre_gray.shape[:2]
track_cfd = np.zeros_like(prev_cfd) track_cfd = np.zeros_like(prev_cfd)
is_track = np.zeros_like(pre_gray) is_track = np.zeros_like(pre_gray)
# 计算前向光流
flow_fw = disflow.calc(pre_gray, cur_gray, None) flow_fw = disflow.calc(pre_gray, cur_gray, None)
# 计算后向光流
flow_bw = disflow.calc(cur_gray, pre_gray, None) flow_bw = disflow.calc(cur_gray, pre_gray, None)
get_round = lambda data: (int)(data + 0.5) if data >= 0 else (int)(data - for r in range(h):
0.5) for c in range(w):
for row in range(hgt): fxy_fw = flow_fw[r, c]
for col in range(wdh):
# 计算光流处理后对应点坐标
# (row, col) -> (cur_x, cur_y)
fxy_fw = flow_fw[row, col]
dx_fw = get_round(fxy_fw[0]) dx_fw = get_round(fxy_fw[0])
cur_x = dx_fw + col cur_x = dx_fw + c
dy_fw = get_round(fxy_fw[1]) dy_fw = get_round(fxy_fw[1])
cur_y = dy_fw + row cur_y = dy_fw + r
if cur_x < 0 or cur_x >= wdh or cur_y < 0 or cur_y >= hgt: if cur_x < 0 or cur_x >= w or cur_y < 0 or cur_y >= h:
continue continue
fxy_bw = flow_bw[cur_y, cur_x] fxy_bw = flow_bw[cur_y, cur_x]
dx_bw = get_round(fxy_bw[0]) dx_bw = get_round(fxy_bw[0])
dy_bw = get_round(fxy_bw[1]) dy_bw = get_round(fxy_bw[1])
# 光流移动小于阈值 if ((dy_fw + dy_bw) * (dy_fw + dy_bw) +
lmt = ((dy_fw + dy_bw) * (dy_fw + dy_bw) + (dx_fw + dx_bw) * (dx_fw + dx_bw)) >= check_thres:
(dx_fw + dx_bw) * (dx_fw + dx_bw))
if lmt >= check_thres:
continue continue
# 静止点降权
if abs(dy_fw) <= 0 and abs(dx_fw) <= 0 and abs(dy_bw) <= 0 and abs( if abs(dy_fw) <= 0 and abs(dx_fw) <= 0 and abs(dy_bw) <= 0 and abs(
dx_bw) <= 0: dx_bw) <= 0:
dl_weights[cur_y, cur_x] = 0.05 dl_weights[cur_y, cur_x] = 0.05
is_track[cur_y, cur_x] = 1 is_track[cur_y, cur_x] = 1
track_cfd[cur_y, cur_x] = prev_cfd[row, col] track_cfd[cur_y, cur_x] = prev_cfd[r, c]
return track_cfd, is_track, dl_weights return track_cfd, is_track, dl_weights
def humanseg_track_fuse(track_cfd, dl_cfd, dl_weights, is_track): def human_seg_track_fuse(track_cfd, dl_cfd, dl_weights, is_track):
"""光流追踪图和人像分割结构融合 """光流追踪图和人像分割结构融合
输入参数: 输入参数:
track_cfd: 光流追踪图 track_cfd: 光流追踪图
dl_cfd: 当前帧分割结果 dl_cfd: 当前帧分割结果
dl_weights: 融合权重图 dl_weights: 融合权重图
is_track: 光流点匹配二值图 is_track: 光流点匹配二值图
返回值: 返回
cur_cfd: 光流跟踪图和人像分割结果融合图 cur_cfd: 光流跟踪图和人像分割结果融合图
""" """
cur_cfd = dl_cfd.copy() fusion_cfd = dl_cfd.copy()
idxs = np.where(is_track > 0) idxs = np.where(is_track > 0)
for i in range(len(idxs)): for i in range(len(idxs[0])):
x, y = idxs[0][i], idxs[1][i] x, y = idxs[0][i], idxs[1][i]
dl_score = dl_cfd[x, y] dl_score = dl_cfd[x, y]
track_score = track_cfd[x, y] track_score = track_cfd[x, y]
fusion_cfd[x, y] = dl_weights[x, y] * dl_score + (
1 - dl_weights[x, y]) * track_score
if dl_score > 0.9 or dl_score < 0.1: if dl_score > 0.9 or dl_score < 0.1:
if dl_weights[x, y] < 0.1: if dl_weights[x, y] < 0.1:
cur_cfd[x, y] = 0.3 * dl_score + 0.7 * track_score fusion_cfd[x, y] = 0.3 * dl_score + 0.7 * track_score
else: else:
cur_cfd[x, y] = 0.4 * dl_score + 0.6 * track_score fusion_cfd[x, y] = 0.4 * dl_score + 0.6 * track_score
else: else:
cur_cfd[x, y] = dl_weights[x, y] * dl_score + ( fusion_cfd[x, y] = dl_weights[x, y] * dl_score + (
1 - dl_weights[x, y]) * track_score 1 - dl_weights[x, y]) * track_score
return cur_cfd return fusion_cfd
def threshold_mask(img, thresh_bg, thresh_fg):
"""设置背景和前景阈值mask
输入参数:
img : 原始图像, np.uint8 类型.
thresh_bg : 背景阈值百分比,低于该值置为0.
thresh_fg : 前景阈值百分比,超过该值置为1.
返回值:
dst : 原始图像设置完前景背景阈值mask结果, np.float32 类型.
"""
dst = (img / 255.0 - thresh_bg) / (thresh_fg - thresh_bg)
dst[np.where(dst > 1)] = 1
dst[np.where(dst < 0)] = 0
return dst.astype(np.float32)
def optflow_handle(cur_gray, scoremap, is_init): def postprocess(cur_gray, scoremap, prev_gray, pre_cfd, disflow, is_init):
"""光流优化 """光流优化
Args: Args:
cur_gray : 当前帧灰度图 cur_gray : 当前帧灰度图
pre_gray : 前一帧灰度图
pre_cfd :前一帧融合结果
scoremap : 当前帧分割结果 scoremap : 当前帧分割结果
difflow : 光流
is_init : 是否第一帧 is_init : 是否第一帧
Returns: Returns:
dst : 光流追踪图和预测结果融合图, 类型为 np.float32 fusion_cfd : 光流追踪图和预测结果融合图
""" """
height, width = scoremap.shape[0], scoremap.shape[1] height, width = scoremap.shape[0], scoremap.shape[1]
disflow = cv2.DISOpticalFlow_create(cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST) disflow = cv2.DISOpticalFlow_create(cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)
prev_gray = np.zeros((height, width), np.uint8) h, w = scoremap.shape
prev_cfd = np.zeros((height, width), np.float32)
cur_cfd = scoremap.copy() cur_cfd = scoremap.copy()
if is_init: if is_init:
is_init = False is_init = False
if height <= 64 or width <= 64: if h <= 64 or w <= 64:
disflow.setFinestScale(1) disflow.setFinestScale(1)
elif height <= 160 or width <= 160: elif h <= 160 or w <= 160:
disflow.setFinestScale(2) disflow.setFinestScale(2)
else: else:
disflow.setFinestScale(3) disflow.setFinestScale(3)
fusion_cfd = cur_cfd fusion_cfd = cur_cfd
else: else:
weights = np.ones((height, width), np.float32) * 0.3 weights = np.ones((w, h), np.float32) * 0.3
track_cfd, is_track, weights = humanseg_tracking( track_cfd, is_track, weights = human_seg_tracking(
prev_gray, cur_gray, prev_cfd, weights, disflow) prev_gray, cur_gray, pre_cfd, weights, disflow)
fusion_cfd = humanseg_track_fuse(track_cfd, cur_cfd, weights, is_track) fusion_cfd = human_seg_track_fuse(track_cfd, cur_cfd, weights, is_track)
fusion_cfd = cv2.GaussianBlur(fusion_cfd, (3, 3), 0) fusion_cfd = cv2.GaussianBlur(fusion_cfd, (3, 3), 0)
return fusion_cfd return fusion_cfd
def postprocess(image, output_data): def threshold_mask(img, thresh_bg, thresh_fg):
"""对预测结果进行后处理 dst = (img / 255.0 - thresh_bg) / (thresh_fg - thresh_bg)
Args: dst[np.where(dst > 1)] = 1
image: 原始图,opencv 图片对象 dst[np.where(dst < 0)] = 0
output_data: Paddle预测结果原始数据 return dst.astype(np.float32)
Returns:
原图和预测结果融合并做了光流优化的结果图
"""
scoremap = output_data[:, :, 1]
scoremap = (scoremap * 255).astype(np.uint8)
# 光流处理
cur_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
optflow_map = optflow_handle(cur_gray, scoremap, False)
optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)
optflow_map = threshold_mask(optflow_map, thresh_bg=0.2, thresh_fg=0.8)
optflow_map = np.repeat(optflow_map[:, :, np.newaxis], 3, axis=2)
bg_im = np.ones_like(optflow_map) * 255
comb = (optflow_map * image + (1 - optflow_map) * bg_im).astype(np.uint8)
return comb
...@@ -4,7 +4,7 @@ import os.path as osp ...@@ -4,7 +4,7 @@ import os.path as osp
import cv2 import cv2
import numpy as np import numpy as np
from utils.humanseg_postprocess import postprocess from utils.humanseg_postprocess import postprocess, threshold_mask
import models import models
import transforms import transforms
...@@ -33,9 +33,39 @@ def parse_args(): ...@@ -33,9 +33,39 @@ def parse_args():
return parser.parse_args() return parser.parse_args()
def predict(img, model, test_transforms):
model.arrange_transform(transforms=test_transforms, mode='test')
img, im_info = test_transforms(img)
img = np.expand_dims(img, axis=0)
result = model.exe.run(
model.test_prog,
feed={'image': img},
fetch_list=list(model.test_outputs.values()))
score_map = result[1]
score_map = np.squeeze(score_map, axis=0)
score_map = np.transpose(score_map, (1, 2, 0))
return score_map, im_info
def recover(img, im_info):
keys = list(im_info.keys())
for k in keys[::-1]:
if k == 'shape_before_resize':
h, w = im_info[k][0], im_info[k][1]
img = cv2.resize(img, (w, h), cv2.INTER_LINEAR)
elif k == 'shape_before_padding':
h, w = im_info[k][0], im_info[k][1]
img = img[0:h, 0:w]
return img
def video_infer(args): def video_infer(args):
resize_h = 192
resize_w = 192
test_transforms = transforms.Compose( test_transforms = transforms.Compose(
[transforms.Resize((192, 192)), [transforms.Resize((resize_w, resize_h)),
transforms.Normalize()]) transforms.Normalize()])
model = models.load_model(args.model_dir) model = models.load_model(args.model_dir)
if not args.video_path: if not args.video_path:
...@@ -47,11 +77,21 @@ def video_infer(args): ...@@ -47,11 +77,21 @@ def video_infer(args):
"--video_path whether existing: {}" "--video_path whether existing: {}"
" or camera whether working".format(args.video_path)) " or camera whether working".format(args.video_path))
return return
if args.video_path:
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
disflow = cv2.DISOpticalFlow_create(cv2.DISOPTICAL_FLOW_PRESET_ULTRAFAST)
prev_gray = np.zeros((resize_h, resize_w), np.uint8)
prev_cfd = np.zeros((resize_h, resize_w), np.float32)
is_init = True
fps = cap.get(cv2.CAP_PROP_FPS) fps = cap.get(cv2.CAP_PROP_FPS)
if args.video_path:
# 用于保存预测结果视频 # 用于保存预测结果视频
if not osp.exists(args.save_dir):
os.makedirs(args.save_dir)
out = cv2.VideoWriter( out = cv2.VideoWriter(
osp.join(args.save_dir, 'result.avi'), osp.join(args.save_dir, 'result.avi'),
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (width, height)) cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (width, height))
...@@ -59,9 +99,24 @@ def video_infer(args): ...@@ -59,9 +99,24 @@ def video_infer(args):
while cap.isOpened(): while cap.isOpened():
ret, frame = cap.read() ret, frame = cap.read()
if ret: if ret:
results = model.predict(frame, test_transforms) score_map, im_info = predict(frame, model, test_transforms)
img_mat = postprocess(frame, results['score_map']) cur_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(img_mat) cur_gray = cv2.resize(cur_gray, (resize_w, resize_h))
scoremap = 255 * score_map[:, :, 1]
optflow_map = postprocess(cur_gray, scoremap, prev_gray, prev_cfd, \
disflow, is_init)
prev_gray = cur_gray.copy()
prev_cfd = optflow_map.copy()
is_init = False
optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)
optflow_map = threshold_mask(
optflow_map, thresh_bg=0.2, thresh_fg=0.8)
img_mat = np.repeat(optflow_map[:, :, np.newaxis], 3, axis=2)
img_mat = recover(img_mat, im_info)
bg_im = np.ones_like(img_mat) * 255
comb = (img_mat * frame + (1 - img_mat) * bg_im).astype(
np.uint8)
out.write(comb)
else: else:
break break
cap.release() cap.release()
...@@ -71,10 +126,25 @@ def video_infer(args): ...@@ -71,10 +126,25 @@ def video_infer(args):
while cap.isOpened(): while cap.isOpened():
ret, frame = cap.read() ret, frame = cap.read()
if ret: if ret:
results = model.predict(frame, test_transforms) score_map, im_info = predict(frame, model, test_transforms)
print(frame.shape, results['score_map'].shape) cur_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img_mat = postprocess(frame, results['score_map']) cur_gray = cv2.resize(cur_gray, (resize_w, resize_h))
cv2.imshow('HumanSegmentation', img_mat) scoremap = 255 * score_map[:, :, 1]
optflow_map = postprocess(cur_gray, scoremap, prev_gray, prev_cfd, \
disflow, is_init)
prev_gray = cur_gray.copy()
prev_cfd = optflow_map.copy()
is_init = False
# optflow_map = optflow_map/255.0
optflow_map = cv2.GaussianBlur(optflow_map, (3, 3), 0)
optflow_map = threshold_mask(
optflow_map, thresh_bg=0.2, thresh_fg=0.8)
img_mat = np.repeat(optflow_map[:, :, np.newaxis], 3, axis=2)
img_mat = recover(img_mat, im_info)
bg_im = np.ones_like(img_mat) * 255
comb = (img_mat * frame + (1 - img_mat) * bg_im).astype(
np.uint8)
cv2.imshow('HumanSegmentation', comb)
if cv2.waitKey(1) & 0xFF == ord('q'): if cv2.waitKey(1) & 0xFF == ord('q'):
break break
else: else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册