未验证 提交 931f1e7c 编写于 作者: jm_12138's avatar jm_12138 提交者: GitHub

update face_landmark_localization (#1966)

* update face_landmark_localization

* fix typo

* update

* add clean func

* update save inference model

* update save inference model
Co-authored-by: Nchenjian <chenjian26@baidu.com>
上级 02674b5d
......@@ -120,18 +120,11 @@
- 当前模型使用的人脸检测模型。
- ```python
def save_inference_model(dirname,
model_filename=None,
params_filename=None,
combined=False):
def save_inference_model(dirname):
```
- 将模型保存到指定路径,由于人脸关键点检测模型由人脸检测+关键点检测两个模型组成,因此保存后会存在两个子目录,其中`face_landmark`为人脸关键点模型,`detector`为人脸检测模型。
- **参数**
- dirname: 存在模型的目录名称
- model_filename: 模型文件名称,默认为\__model__
- params_filename: 参数文件名称,默认为\__params__(仅当combined为True时生效)
- combined: 是否将参数保存到统一的一个文件中
- dirname: 模型保存路径
## 四、服务部署
......@@ -187,6 +180,10 @@
移除 fluid api
* 1.1.0
修复无法导出推理模型的问题
* ```shell
$ hub install face_landmark_localization==1.0.3
$ hub install face_landmark_localization==1.1.0
```
# coding=utf-8
import os
import time
from collections import OrderedDict
import cv2
import numpy as np
from PIL import Image
__all__ = ['reader']
......
......@@ -5,15 +5,14 @@ from __future__ import division
import argparse
import ast
import os
import time
from collections import OrderedDict
import cv2
import numpy as np
import paddle
from face_landmark_localization.data_feed import reader
from face_landmark_localization.processor import base64_to_cv2
from face_landmark_localization.processor import postprocess
import paddle.jit
import paddle.static
from .data_feed import reader
from .processor import base64_to_cv2
from .processor import postprocess
from paddle.inference import Config
from paddle.inference import create_predictor
......@@ -30,15 +29,14 @@ from paddlehub.module.module import serving
author_email="paddle-dev@baidu.com",
summary=
"Face_Landmark_Localization can be used to locate face landmark. This Module is trained through the MPII Human Pose dataset.",
version="1.0.3")
class FaceLandmarkLocalization(hub.Module):
def _initialize(self, face_detector_module=None):
version="1.1.0")
class FaceLandmarkLocalization:
def __init__(self, face_detector_module=None):
"""
Args:
face_detector_module (class): module to detect face.
"""
self.default_pretrained_model_path = os.path.join(self.directory, "face_landmark_localization")
self.default_pretrained_model_path = os.path.join(self.directory, "face_landmark_localization", "model")
if face_detector_module is None:
self.face_detector = hub.Module(name="ultra_light_fast_generic_face_detector_1mb_640")
else:
......@@ -49,7 +47,9 @@ class FaceLandmarkLocalization(hub.Module):
"""
predictor config setting
"""
cpu_config = Config(self.default_pretrained_model_path)
model = self.default_pretrained_model_path+'.pdmodel'
params = self.default_pretrained_model_path+'.pdiparams'
cpu_config = Config(model, params)
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_predictor(cpu_config)
......@@ -61,7 +61,7 @@ class FaceLandmarkLocalization(hub.Module):
except:
use_gpu = False
if use_gpu:
gpu_config = Config(self.default_pretrained_model_path)
gpu_config = Config(model, params)
gpu_config.disable_glog_info()
gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
self.gpu_predictor = create_predictor(gpu_config)
......@@ -78,30 +78,6 @@ class FaceLandmarkLocalization(hub.Module):
def get_face_detector_module(self):
return self.face_detector
def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = paddle.CPUPlace()
exe = paddle.Executor(place)
program, feeded_var_names, target_vars = paddle.static.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
face_landmark_dir = os.path.join(dirname, "face_landmark")
detector_dir = os.path.join(dirname, "detector")
paddle.static.save_inference_model(dirname=face_landmark_dir,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
self.face_detector.save_inference_model(dirname=detector_dir,
model_filename=model_filename,
params_filename=params_filename,
combined=combined)
def keypoint_detection(self,
images=None,
paths=None,
......
......@@ -6,7 +6,6 @@ from __future__ import print_function
import base64
import os
import time
from collections import OrderedDict
import cv2
import numpy as np
......
import os
import shutil
import unittest
import cv2
import requests
import paddlehub as hub
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
class TestHubModule(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
img_url = 'https://ai-studio-static-online.cdn.bcebos.com/7799a8ccc5f6471b9d56fb6eff94f82a08b70ca2c7594d3f99877e366c0a2619'
if not os.path.exists('tests'):
os.makedirs('tests')
response = requests.get(img_url)
assert response.status_code == 200, 'Network Error.'
with open('tests/test.jpg', 'wb') as f:
f.write(response.content)
cls.module = hub.Module(name="face_landmark_localization")
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree('tests')
shutil.rmtree('inference')
shutil.rmtree('face_landmark_output')
def test_keypoint_detection1(self):
results = self.module.keypoint_detection(
paths=['tests/test.jpg'],
use_gpu=False,
visualization=False
)
kps = results[0]['data'][0]
self.assertIsInstance(kps, list)
def test_keypoint_detection2(self):
results = self.module.keypoint_detection(
images=[cv2.imread('tests/test.jpg')],
use_gpu=False,
visualization=False
)
kps = results[0]['data'][0]
self.assertIsInstance(kps, list)
def test_keypoint_detection3(self):
results = self.module.keypoint_detection(
images=[cv2.imread('tests/test.jpg')],
use_gpu=False,
visualization=True
)
kps = results[0]['data'][0]
self.assertIsInstance(kps, list)
def test_keypoint_detection4(self):
results = self.module.keypoint_detection(
images=[cv2.imread('tests/test.jpg')],
use_gpu=True,
visualization=False
)
kps = results[0]['data'][0]
self.assertIsInstance(kps, list)
def test_keypoint_detection5(self):
self.assertRaises(
AssertionError,
self.module.keypoint_detection,
paths=['no.jpg']
)
def test_keypoint_detection6(self):
self.assertRaises(
AttributeError,
self.module.keypoint_detection,
images=['test.jpg']
)
def test_save_inference_model(self):
self.module.save_inference_model('./inference/model')
self.assertTrue(os.path.exists('./inference/model/model.pdmodel'))
self.assertTrue(os.path.exists('./inference/model/model.pdiparams'))
self.assertTrue(os.path.exists('./inference/model/face_detector.pdmodel'))
self.assertTrue(os.path.exists('./inference/model/face_detector.pdiparams'))
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册