diff --git a/modelcenter/PP-Matting/APP1/app.py b/modelcenter/PP-Matting/APP1/app.py
index ee7d729259a819e26c3d200d827e2732bf5a8a3f..88486843d13edfd73307dba787a3cee493cef120 100644
--- a/modelcenter/PP-Matting/APP1/app.py
+++ b/modelcenter/PP-Matting/APP1/app.py
@@ -1,182 +1,78 @@
-import codecs
-import os
-import sys
-import time
-import zipfile
-
import gradio as gr
import numpy as np
-import cv2
-import requests
-import yaml
-from paddle.inference import Config as PredictConfig
-from paddle.inference import create_predictor
-
-lasttime = time.time()
-FLUSH_INTERVAL = 0.1
-
-
-def progress(str, end=False):
- global lasttime
- if end:
- str += "\n"
- lasttime = 0
- if time.time() - lasttime >= FLUSH_INTERVAL:
- sys.stdout.write("\r%s" % str)
- lasttime = time.time()
- sys.stdout.flush()
-
-
-def _download_file(url, savepath, print_progress=True):
- if print_progress:
- print("Connecting to {}".format(url))
- r = requests.get(url, stream=True, timeout=15)
- total_length = r.headers.get('content-length')
-
- if total_length is None:
- with open(savepath, 'wb') as f:
- shutil.copyfileobj(r.raw, f)
- else:
- with open(savepath, 'wb') as f:
- dl = 0
- total_length = int(total_length)
- starttime = time.time()
- if print_progress:
- print("Downloading %s" % os.path.basename(savepath))
- for data in r.iter_content(chunk_size=4096):
- dl += len(data)
- f.write(data)
- if print_progress:
- done = int(50 * dl / total_length)
- progress("[%-50s] %.2f%%" %
- ('=' * done, float(100 * dl) / total_length))
- if print_progress:
- progress("[%-50s] %.2f%%" % ('=' * 50, 100), end=True)
-
-
-def uncompress(path):
- files = zipfile.ZipFile(path, 'r')
- filelist = files.namelist()
- rootpath = filelist[0]
- for file in filelist:
- files.extract(file, './')
-
-
-class DeployConfig:
- def __init__(self, path):
- with codecs.open(path, 'r', 'utf-8') as file:
- self.dic = yaml.load(file, Loader=yaml.FullLoader)
- self._dir = os.path.dirname(path)
-
- @property
- def model(self):
- return os.path.join(self._dir, self.dic['Deploy']['model'])
-
- @property
- def params(self):
- return os.path.join(self._dir, self.dic['Deploy']['params'])
-
-
-class Predictor:
- def __init__(self, cfg):
- """
- Prepare for prediction.
- The usage and docs of paddle inference, please refer to
- https://paddleinference.paddlepaddle.org.cn/product_introduction/summary.html
- """
- self.cfg = DeployConfig(cfg)
-
- self._init_base_config()
-
- self._init_cpu_config()
-
- self.predictor = create_predictor(self.pred_cfg)
-
- def _init_base_config(self):
- self.pred_cfg = PredictConfig(self.cfg.model, self.cfg.params)
- self.pred_cfg.enable_memory_optim()
- self.pred_cfg.switch_ir_optim(True)
-
- def _init_cpu_config(self):
- """
- Init the config for x86 cpu.
- """
- self.pred_cfg.disable_gpu()
- self.pred_cfg.set_cpu_math_library_num_threads(10)
-
- def _preprocess(self, img):
- # resize short edge to 512.
- h, w = img.shape[:2]
- short_edge = min(h, w)
- scale = 512 / short_edge
- h_resize = int(round(h * scale)) // 32 * 32
- w_resize = int(round(w * scale)) // 32 * 32
- img = cv2.resize(img, (w_resize, h_resize))
- img = (img / 255 - 0.5) / 0.5
- img = np.transpose(img, [2, 0, 1])[np.newaxis, :]
- return img
-
- def run(self, img):
- input_names = self.predictor.get_input_names()
- input_handle = {}
-
- for i in range(len(input_names)):
- input_handle[input_names[i]] = self.predictor.get_input_handle(
- input_names[i])
- output_names = self.predictor.get_output_names()
- output_handle = self.predictor.get_output_handle(output_names[0])
-
- img_inputs = img.astype('float32')
- ori_h, ori_w = img_inputs.shape[:2]
- img_inputs = self._preprocess(img=img_inputs)
- input_handle['img'].copy_from_cpu(img_inputs)
-
- self.predictor.run()
-
- results = output_handle.copy_to_cpu()
- alpha = results.squeeze()
- alpha = cv2.resize(alpha, (ori_w, ori_h))
- alpha = (alpha * 255).astype('uint8')
-
- return alpha
-
-
-def model_inference(image):
- # Download inference model
- url = 'https://paddleseg.bj.bcebos.com/matting/models/deploy/ppmatting-hrnet_w18-human_512.zip'
- savepath = './ppmatting-hrnet_w18-human_512.zip'
- if not os.path.exists('./ppmatting-hrnet_w18-human_512'):
- _download_file(url=url, savepath=savepath)
- uncompress(savepath)
- # Inference
- predictor = Predictor(cfg='./ppmatting-hrnet_w18-human_512/deploy.yaml')
- alpha = predictor.run(image)
+import utils
+from predict import build_predictor
- return alpha
+IMAGE_DEMO = "./images/idphoto.jpg"
+predictor = build_predictor()
+sizes_play = utils.size_play()
-def clear_all():
- return None, None
+def get_output(img, size, bg, download_size):
+ """
+ Get the special size and background photo.
+ Args:
+ img(numpy:ndarray): The image array.
+ size(str): The size user specified.
+ bg(str): The background color user specified.
+ download_size(str): The size for image saving.
-with gr.Blocks() as demo:
- gr.Markdown("Objective Detection")
+ """
+ alpha = predictor.run(img)
+ res = utils.bg_replace(img, alpha, bg_name=bg)
- with gr.Column(scale=1, min_width=100):
+ size_index = sizes_play.index(size)
+ res = utils.adjust_size(res, size_index)
+ res_download = utils.download(res, download_size)
+ return res, res_download
- img_in = gr.Image(
- value="https://paddleseg.bj.bcebos.com/matting/demo/human.jpg",
- label="Input")
- with gr.Row():
- btn1 = gr.Button("Clear")
- btn2 = gr.Button("Submit")
+def download(img, size):
+ utils.download(img, size)
+ return None
- img_out = gr.Image(label="Output").style(height=200)
- btn2.click(fn=model_inference, inputs=img_in, outputs=[img_out])
- btn1.click(fn=clear_all, inputs=None, outputs=[img_in, img_out])
+with gr.Blocks() as demo:
+ gr.Markdown("""# ID Photo DIY""")
+
+ img_in = gr.Image(value=IMAGE_DEMO, label="Input image")
+ gr.Markdown(
+ """Tips: Please upload photos with good posture, center portrait, crown free, no jewelry, ears and eyebrows exposed."""
+ )
+ with gr.Row():
+ size = gr.Dropdown(sizes_play, label="Sizes", value=sizes_play[0])
+ bg = gr.Radio(
+ ["White", "Red", "Blue"], label="Background color", value='White')
+ download_size = gr.Radio(
+ ["Small", "Middle", "Large"],
+ label="File size (affects image quality)",
+ value='Large',
+ interactive=True)
+
+ with gr.Row():
+ btn1 = gr.Button("Clear")
+ btn2 = gr.Button("Submit")
+
+ img_out = gr.Image(
+ label="Output image", interactive=False).style(height=300)
+ f1 = gr.File(label='Image download').style(height=50)
+ with gr.Row():
+ gr.Markdown(
+ """This application is supported by [PaddleSeg](https://github.com/PaddlePaddle/PaddleSeg).
+ If you have any questions or feature requists, welcome to raise issues on [GitHub](https://github.com/PaddlePaddle/PaddleSeg/issues). BTW, a star is a great encouragement for us, thanks! ^_^"""
+ )
+
+ btn2.click(
+ fn=get_output,
+ inputs=[img_in, size, bg, download_size],
+ outputs=[img_out, f1])
+ btn1.click(
+ fn=utils.clear_all,
+ inputs=None,
+ outputs=[img_in, img_out, size, bg, download_size, f1])
+
gr.Button.style(1)
-demo.launch(share=True)
+demo.launch()
diff --git a/modelcenter/PP-Matting/APP1/download.py b/modelcenter/PP-Matting/APP1/download.py
new file mode 100644
index 0000000000000000000000000000000000000000..83c94303554f8d1f1c0f31090b48c86bb80176bf
--- /dev/null
+++ b/modelcenter/PP-Matting/APP1/download.py
@@ -0,0 +1,55 @@
+import os
+import sys
+import time
+
+import requests
+import zipfile
+
+FLUSH_INTERVAL = 0.1
+lasttime = time.time()
+
+
+def progress(str, end=False):
+ global lasttime
+ if end:
+ str += "\n"
+ lasttime = 0
+ if time.time() - lasttime >= FLUSH_INTERVAL:
+ sys.stdout.write("\r%s" % str)
+ lasttime = time.time()
+ sys.stdout.flush()
+
+
+def download_file(url, savepath, print_progress=True):
+ if print_progress:
+ print("Connecting to {}".format(url))
+ r = requests.get(url, stream=True, timeout=15)
+ total_length = r.headers.get('content-length')
+
+ if total_length is None:
+ with open(savepath, 'wb') as f:
+ shutil.copyfileobj(r.raw, f)
+ else:
+ with open(savepath, 'wb') as f:
+ dl = 0
+ total_length = int(total_length)
+ starttime = time.time()
+ if print_progress:
+ print("Downloading %s" % os.path.basename(savepath))
+ for data in r.iter_content(chunk_size=4096):
+ dl += len(data)
+ f.write(data)
+ if print_progress:
+ done = int(50 * dl / total_length)
+ progress("[%-50s] %.2f%%" %
+ ('=' * done, float(100 * dl) / total_length))
+ if print_progress:
+ progress("[%-50s] %.2f%%" % ('=' * 50, 100), end=True)
+
+
+def uncompress(path):
+ files = zipfile.ZipFile(path, 'r')
+ filelist = files.namelist()
+ rootpath = filelist[0]
+ for file in filelist:
+ files.extract(file, './')
diff --git a/modelcenter/PP-Matting/APP1/images/idphoto.jpg b/modelcenter/PP-Matting/APP1/images/idphoto.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6f0d8de71683c373fd9d9379d5470fd15219d79b
Binary files /dev/null and b/modelcenter/PP-Matting/APP1/images/idphoto.jpg differ
diff --git a/modelcenter/PP-Matting/APP1/images/paddleseg_github.png b/modelcenter/PP-Matting/APP1/images/paddleseg_github.png
new file mode 100644
index 0000000000000000000000000000000000000000..c08d517fc906e21cd6388bbf2fb82c3ff5905030
Binary files /dev/null and b/modelcenter/PP-Matting/APP1/images/paddleseg_github.png differ
diff --git a/modelcenter/PP-Matting/APP1/predict.py b/modelcenter/PP-Matting/APP1/predict.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a4a641347a36f8056ebeee380f6dd5050db82c8
--- /dev/null
+++ b/modelcenter/PP-Matting/APP1/predict.py
@@ -0,0 +1,102 @@
+import os
+import codecs
+
+import numpy as np
+import cv2
+import yaml
+from paddle.inference import Config as PredictConfig
+from paddle.inference import create_predictor
+
+from download import download_file, uncompress
+
+URL = 'https://paddleseg.bj.bcebos.com/matting/models/deploy/ppmatting-hrnet_w18-human_512.zip'
+SAVEPATH = './ppmatting-hrnet_w18-human_512.zip'
+
+
+class DeployConfig:
+ def __init__(self, path):
+ with codecs.open(path, 'r', 'utf-8') as file:
+ self.dic = yaml.load(file, Loader=yaml.FullLoader)
+ self._dir = os.path.dirname(path)
+
+ @property
+ def model(self):
+ return os.path.join(self._dir, self.dic['Deploy']['model'])
+
+ @property
+ def params(self):
+ return os.path.join(self._dir, self.dic['Deploy']['params'])
+
+
+class Predictor:
+ def __init__(self, cfg):
+ """
+ Prepare for prediction.
+ The usage and docs of paddle inference, please refer to
+ https://paddleinference.paddlepaddle.org.cn/product_introduction/summary.html
+ """
+ self.cfg = DeployConfig(cfg)
+
+ self._init_base_config()
+
+ self._init_cpu_config()
+
+ self.predictor = create_predictor(self.pred_cfg)
+
+ def _init_base_config(self):
+ self.pred_cfg = PredictConfig(self.cfg.model, self.cfg.params)
+ self.pred_cfg.enable_memory_optim()
+ self.pred_cfg.switch_ir_optim(True)
+
+ def _init_cpu_config(self):
+ """
+ Init the config for x86 cpu.
+ """
+ self.pred_cfg.disable_gpu()
+ self.pred_cfg.set_cpu_math_library_num_threads(10)
+
+ def _preprocess(self, img):
+ # resize short edge to 512.
+ h, w = img.shape[:2]
+ short_edge = min(h, w)
+ scale = 512 / short_edge
+ h_resize = int(round(h * scale)) // 32 * 32
+ w_resize = int(round(w * scale)) // 32 * 32
+ img = cv2.resize(img, (w_resize, h_resize))
+ img = (img / 255 - 0.5) / 0.5
+ img = np.transpose(img, [2, 0, 1])[np.newaxis, :]
+ return img
+
+ def run(self, img):
+ input_names = self.predictor.get_input_names()
+ input_handle = {}
+
+ for i in range(len(input_names)):
+ input_handle[input_names[i]] = self.predictor.get_input_handle(
+ input_names[i])
+ output_names = self.predictor.get_output_names()
+ output_handle = self.predictor.get_output_handle(output_names[0])
+
+ img_inputs = img.astype('float32')
+ ori_h, ori_w = img_inputs.shape[:2]
+ img_inputs = self._preprocess(img=img_inputs)
+ input_handle['img'].copy_from_cpu(img_inputs)
+
+ self.predictor.run()
+
+ results = output_handle.copy_to_cpu()
+ alpha = results.squeeze()
+ alpha = cv2.resize(alpha, (ori_w, ori_h))
+ alpha = (alpha * 255).astype('uint8')
+
+ return alpha
+
+
+def build_predictor():
+ # Download inference model
+ if not os.path.exists('./ppmatting-hrnet_w18-human_512'):
+ download_file(url=URL, savepath=SAVEPATH)
+ uncompress(SAVEPATH)
+ cfg = os.path.join(os.path.splitext(SAVEPATH)[0], 'deploy.yaml')
+ predictor = Predictor(cfg)
+ return predictor
diff --git a/modelcenter/PP-Matting/APP1/requirement.txt b/modelcenter/PP-Matting/APP1/requirement.txt
index 5536fb7d1e800b422effd619540733cfd60a5f5a..5784221bf6ba34b0c8960bd71e570b18c51e5882 100644
--- a/modelcenter/PP-Matting/APP1/requirement.txt
+++ b/modelcenter/PP-Matting/APP1/requirement.txt
@@ -1,4 +1,5 @@
gradio
paddlepaddle
opencv-python
-pyyaml >= 5.1
\ No newline at end of file
+pyyaml >= 5.1
+pymatting
\ No newline at end of file
diff --git a/modelcenter/PP-Matting/APP1/utils.py b/modelcenter/PP-Matting/APP1/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9849755171eb5824c413ab4aa8ddaca85563c27
--- /dev/null
+++ b/modelcenter/PP-Matting/APP1/utils.py
@@ -0,0 +1,137 @@
+import os
+import time
+
+from collections import OrderedDict
+import numpy as np
+import pymatting
+import cv2
+from PIL import Image
+
+SIZES = OrderedDict({
+ "1 inch": {
+ 'physics': (25, 35),
+ 'pixels': (295, 413)
+ },
+ "1 inch smaller": {
+ 'physics': (22, 32),
+ 'pixels': (260, 378)
+ },
+ "1 inch larger": {
+ 'physics': (33, 48),
+ 'pixels': (390, 567)
+ },
+ "2 inches": {
+ 'physics': (35, 49),
+ 'pixels': (413, 579)
+ },
+ "2 inches smaller": {
+ 'physics': (35, 45),
+ 'pixels': (413, 531)
+ },
+ "2 inches larger": {
+ 'physics': (35, 53),
+ 'pixels': (413, 626)
+ },
+ "3 inches": {
+ 'physics': (55, 84),
+ 'pixels': (649, 991)
+ },
+ "4 inches": {
+ 'physics': (76, 102),
+ 'pixels': (898, 1205)
+ },
+ "5 inches": {
+ 'physics': (89, 127),
+ 'pixels': (1050, 1500)
+ }
+})
+
+# R, G, B
+COLOR_MAP = {
+ 'White': [255, 255, 255],
+ 'Blue': [0, 191, 243],
+ 'Red': [255, 0, 0]
+}
+
+# jpg compress ratio
+SAVE_SIZE = {'Small': 50, 'Middle': 75, 'Large': 95}
+
+
+def delete_result():
+ """clear old result in `.temp`"""
+ root = '.temp'
+ results = sorted(os.listdir(root))
+ for res in results:
+ if int(time.time()) - int(os.path.splitext(res)[0]) > 10000:
+ os.remove(os.path.join(root, res))
+
+
+def clear_all():
+ delete_result()
+ return None, None, size_play()[0], 'White', 'Large', None
+
+
+def size_play():
+ sizes = []
+ for k, v in SIZES.items():
+ size = ''.join([
+ k, '(', str(v['physics'][0]), 'x', str(v['physics'][1]), 'mm,',
+ str(v['pixels'][0]), 'x', str(v['pixels'][1]), 'px)'
+ ])
+ sizes.append(size)
+ return sizes
+
+
+def bg_replace(img, alpha, bg_name):
+ bg = COLOR_MAP[bg_name]
+ bg = np.array(bg)[None, None, :]
+ alpha = alpha / 255.
+ pymatting.estimate_foreground_ml(img / 255., alpha) * 255
+ alpha = alpha[:, :, None]
+ res = alpha * img + (1 - alpha) * bg
+ return res.astype('uint8')
+
+
+def adjust_size(img, size_index):
+ key = list(SIZES.keys())[size_index]
+ w_o, h_o = SIZES[key]['pixels']
+
+ # scale
+ h_ori, w_ori = img.shape[:2]
+ scale = max(w_o / w_ori, h_o / h_ori)
+ if scale > 1:
+ interpolation = cv2.INTER_CUBIC
+ else:
+ interpolation = cv2.INTER_AREA
+ img_scale = cv2.resize(
+ img, dsize=None, fx=scale, fy=scale, interpolation=interpolation)
+
+ # crop
+ h_scale, w_scale = img_scale.shape[:2]
+ h_cen = h_scale // 2
+ w_cen = w_scale // 2
+ h_start = max(0, h_cen - h_o // 2)
+ h_end = min(h_scale, h_start + h_o)
+ w_start = max(0, w_cen - w_o // 2)
+ w_end = min(w_scale, w_start + w_o)
+ img_c = img_scale[h_start:h_end, w_start:w_end]
+
+ return img_c
+
+
+def download(img, size):
+ q = SAVE_SIZE[size]
+ while True:
+ name = str(int(time.time()))
+ tmp_name = './.temp/' + name + '.jpg'
+ if not os.path.exists(tmp_name):
+ break
+ else:
+ time.sleep(1)
+ dir_name = os.path.dirname(tmp_name)
+ if not os.path.exists(dir_name):
+ os.makedirs(dir_name)
+
+ im = Image.fromarray(img)
+ im.save(tmp_name, 'jpeg', quality=q, dpi=(300, 300))
+ return tmp_name