提交 efd2b7d7 编写于 作者: S Steffy-zxf

fix deeplabv3p_xception65_humanseg postprocess bug

上级 bfa5d4ac
...@@ -22,7 +22,7 @@ from deeplabv3p_xception65_humanseg.data_feed import reader ...@@ -22,7 +22,7 @@ from deeplabv3p_xception65_humanseg.data_feed import reader
author="baidu-vis", author="baidu-vis",
author_email="", author_email="",
summary="DeepLabv3+ is a semantic segmentation model.", summary="DeepLabv3+ is a semantic segmentation model.",
version="1.1.0") version="1.1.1")
class DeeplabV3pXception65HumanSeg(hub.Module): class DeeplabV3pXception65HumanSeg(hub.Module):
def _initialize(self): def _initialize(self):
self.default_pretrained_model_path = os.path.join( self.default_pretrained_model_path = os.path.join(
...@@ -220,3 +220,11 @@ class DeeplabV3pXception65HumanSeg(hub.Module): ...@@ -220,3 +220,11 @@ class DeeplabV3pXception65HumanSeg(hub.Module):
""" """
self.arg_input_group.add_argument( self.arg_input_group.add_argument(
'--input_path', type=str, help="path to image.") '--input_path', type=str, help="path to image.")
if __name__ == "__main__":
m = DeeplabV3pXception65HumanSeg()
import cv2
img = cv2.imread('./meditation.jpg')
res = m.segmentation(images=[img])
print(res[0]['data'])
...@@ -52,8 +52,9 @@ def postprocess(data_out, ...@@ -52,8 +52,9 @@ def postprocess(data_out,
for logit in data_out: for logit in data_out:
logit = logit[1] * 255 logit = logit[1] * 255
logit = cv2.resize(logit, (org_im_shape[1], org_im_shape[0])) logit = cv2.resize(logit, (org_im_shape[1], org_im_shape[0]))
ret, logit = cv2.threshold(logit, thresh, 0, cv2.THRESH_TOZERO) logit -= thresh
logit = 255 * (logit - thresh) / (255 - thresh) logit[logit < 0] = 0
logit = 255 * logit / (255 - thresh)
rgba = np.concatenate((org_im, np.expand_dims(logit, axis=2)), axis=2) rgba = np.concatenate((org_im, np.expand_dims(logit, axis=2)), axis=2)
if visualization: if visualization:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册