+ Prompt = object, details, description
+ For more details, please refer to YouPromptMe Guide.
+ There are more interesting models in PaddleHub, if you think it's great, welcome to star PaddleHub.
+
+ Besides, you can use free GPU resourses in aistudio to enjoy more cases, have fun.
+
+
+
+ """)
+ gr.Markdown("""
+ 在"探索无限"的风格模式下,画作的真实风格完全可以由你的prompt来决定。下面是一些参考案例:
+
+ In "Explore infinity" style mode, how the image looks like is totally up to your prompt. Below are some cases:
+
+ ||
+ | --- |
+ | prompt:蒙娜丽莎,赛博朋克,宝丽来,33毫米,蒸汽波艺术 |
+
+
+ ||
+ | --- |
+ | prompt:火焰,凤凰,少女,未来感,高清,3d,精致面容,cg感,古风,唯美,毛发细致,上半身立绘 |
+
+
+ ||
+ | --- |
+ | prompt:巨狼,飘雪,蓝色大片烟雾,毛发细致,烟雾缭绕,高清,3d,cg感,侧面照 |
+
+
+ | |
+ | --- |
+ | prompt:浮世绘日本科幻哑光绘画,概念艺术,动漫风格神道寺禅园英雄动作序列,包豪斯|
+
+
+
+ ### [更多内容...](https://github.com/PaddlePaddle/PaddleHub/blob/develop/modules/image/text_to_image/ernie_vilg/README.md#四-prompt-指南)([Explore more...](https://github.com/PaddlePaddle/PaddleHub/blob/develop/modules/image/text_to_image/ernie_vilg/README.md#四-prompt-指南))
+
+
+ """)
+ gr.HTML('''
+
+ ''')
+
+ return block
diff --git a/modules/text/lexical_analysis/jieba_paddle/README.md b/modules/text/lexical_analysis/jieba_paddle/README.md
index 8d141131f0eff7cd672000ec82e9b60d0248dd02..67cc1be5698f00050c41ad52c443c1fbbb0a6ecb 100644
--- a/modules/text/lexical_analysis/jieba_paddle/README.md
+++ b/modules/text/lexical_analysis/jieba_paddle/README.md
@@ -207,6 +207,10 @@
- 关于PaddleHub Serving更多信息参考:[服务部署](../../../../docs/docs_ch/tutorial/serving.md)
+- ## gradio app 支持
+ 从paddlehub 2.3.1开始支持使用链接 http://127.0.0.1:8866/gradio/jieba_paddle 在浏览器中访问jieba_paddle的gradio app。
+
+
## 五、更新历史
@@ -218,6 +222,9 @@
移除 fluid api
+* 1.1.0
+ 新增对gradio app的支持
+
- ```shell
- $ hub install jieba_paddle==1.0.1
+ $ hub install jieba_paddle==1.1.0
```
diff --git a/modules/text/lexical_analysis/jieba_paddle/module.py b/modules/text/lexical_analysis/jieba_paddle/module.py
index 45b0bacd3c77d387a40d8a7bda6c60f9c2357c8d..955b34e9abc4d6fe1d09bc0519004254607347fe 100644
--- a/modules/text/lexical_analysis/jieba_paddle/module.py
+++ b/modules/text/lexical_analysis/jieba_paddle/module.py
@@ -14,7 +14,7 @@ from paddlehub.module.module import serving
@moduleinfo(
name="jieba_paddle",
- version="1.0.1",
+ version="1.1.0",
summary=
"jieba_paddle is a chineses tokenizer using BiGRU base on the PaddlePaddle deeplearning framework. More information please refer to https://github.com/fxsjy/jieba.",
author="baidu-paddle",
@@ -54,6 +54,24 @@ class JiebaPaddle(hub.Module):
return seg_list
+ def create_gradio_app(self):
+ import gradio as gr
+
+ def inference(text):
+ results = self.cut(sentence=text)
+ return results
+
+ title = "jieba_paddle"
+ description = "jieba_paddle is a word segmentation model based on paddlepaddle deep learning framework."
+
+ examples = [['今天是个好日子']]
+ app = gr.Interface(inference,
+ "text", [gr.outputs.Textbox(label="words")],
+ title=title,
+ description=description,
+ examples=examples)
+ return app
+
def check_dependency(self):
"""
Check jieba tool dependency.
diff --git a/modules/text/lexical_analysis/lac/module.py b/modules/text/lexical_analysis/lac/module.py
index df6993761b7f410b2fcf3aae5ef688bc93ca2651..e1d34c6b69b5692175ca575b4d111f1196c61938 100644
--- a/modules/text/lexical_analysis/lac/module.py
+++ b/modules/text/lexical_analysis/lac/module.py
@@ -13,21 +13,18 @@ import os
import numpy as np
import paddle
import six
-from lac.custom import Customization
-from lac.processor import load_kv_dict
-from lac.processor import parse_result
-from lac.processor import word_to_ids
from paddle.inference import Config
from paddle.inference import create_predictor
-import paddlehub as hub
-from paddlehub.common.logger import logger
-from paddlehub.common.paddle_helper import add_vars_prefix
-from paddlehub.common.utils import sys_stdin_encoding
-from paddlehub.io.parser import txt_parser
+from .custom import Customization
+from .processor import load_kv_dict
+from .processor import parse_result
+from .processor import word_to_ids
from paddlehub.module.module import moduleinfo
from paddlehub.module.module import runnable
from paddlehub.module.module import serving
+from paddlehub.utils.parser import txt_parser
+from paddlehub.utils.utils import sys_stdin_encoding
class DataFormatError(Exception):
@@ -44,9 +41,9 @@ class DataFormatError(Exception):
author="baidu-nlp",
author_email="paddle-dev@baidu.com",
type="nlp/lexical_analysis")
-class LAC(hub.Module):
+class LAC:
- def _initialize(self, user_dict=None):
+ def __init__(self, user_dict=None):
"""
initialize with the necessary elements
"""
@@ -72,7 +69,10 @@ class LAC(hub.Module):
"""
predictor config setting
"""
- cpu_config = Config(self.pretrained_model_path)
+ model = self.default_pretrained_model_path + '.pdmodel'
+ params = self.default_pretrained_model_path + '.pdiparams'
+ cpu_config = Config(model, params)
+
cpu_config.disable_glog_info()
cpu_config.disable_gpu()
self.cpu_predictor = create_predictor(cpu_config)
diff --git a/paddlehub/serving/app_compat.py b/paddlehub/serving/app_compat.py
index 88464733de4ef692667e5605c2d1f8eec1156909..a3f3f50cdf5a0da551024b124ad6adfe24823319 100644
--- a/paddlehub/serving/app_compat.py
+++ b/paddlehub/serving/app_compat.py
@@ -12,19 +12,30 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-import traceback
+import socket
+import threading
import time
+import traceback
+from multiprocessing import Process
+from threading import Lock
-from flask import Flask, request
+import requests
+from flask import Flask
+from flask import redirect
+from flask import request
+from flask import Response
from paddlehub.serving.model_service.base_model_service import cv_module_info
from paddlehub.serving.model_service.base_model_service import nlp_module_info
from paddlehub.serving.model_service.base_model_service import v2_module_info
-from paddlehub.utils import utils, log
+from paddlehub.utils import log
+from paddlehub.utils import utils
filename = 'HubServing-%s.log' % time.strftime("%Y_%m_%d", time.localtime())
+_gradio_apps = {} # Used to store all launched gradio apps
+_lock = Lock() # Used to prevent parallel requests to launch a server twice
+
def package_result(status: str, msg: str, data: dict):
'''
@@ -55,6 +66,54 @@ def package_result(status: str, msg: str, data: dict):
return {"status": status, "msg": msg, "results": data}
+def create_gradio_app(module_info: dict):
+ '''
+ Create a gradio app and launch a server for users.
+ Args:
+ module_info(dict): Module info include module name, method name and
+ other info.
+ Return:
+ int: port number, if server has been successful.
+
+ Exception:
+ Raise a exception if server can not been launched.
+ '''
+ module_name = module_info['module_name']
+ port = None
+ with _lock:
+ if module_name not in _gradio_apps:
+ try:
+ serving_method = getattr(module_info["module"], 'create_gradio_app')
+ except Exception:
+ raise RuntimeError('Module {} is not supported for gradio app.'.format(module_name))
+
+ def get_free_tcp_port():
+ tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ tcp.bind(('localhost', 0))
+ addr, port = tcp.getsockname()
+ tcp.close()
+ return port
+
+ port = get_free_tcp_port()
+ app = serving_method()
+ process = Process(target=app.launch, kwargs={'server_port': port})
+ process.start()
+
+ def check_alive():
+ nonlocal port
+ while True:
+ try:
+ requests.get('http://localhost:{}/'.format(port))
+ break
+ except Exception:
+ time.sleep(1)
+
+ check_alive()
+ _gradio_apps[module_name] = port
+ return port
+
+
def predict_v2(module_info: dict, input: dict):
'''
@@ -159,6 +218,47 @@ def create_app(init_flag: bool = False, configs: dict = None):
results = predict_v2(module_info, inputs)
return results
+ @app_instance.route('/gradio/', methods=["GET", "POST"])
+ def gradio_app(module_name: str):
+ if module_name in v2_module_info.modules:
+ module_info = v2_module_info.get_module_info(module_name)
+ module_info['module_name'] = module_name
+ else:
+ msg = "Module {} is not supported for gradio app.".format(module_name)
+ return package_result("111", msg, "")
+ create_gradio_app(module_info)
+ return redirect("/gradio/{}/app".format(module_name), code=302)
+
+ @app_instance.route("/gradio//", methods=["GET", "POST"])
+ def request_gradio_app(module_name: str, path: str):
+ '''
+ Gradio app server url interface. We route urls for gradio app to gradio server.
+
+ Args:
+ module_name(str): Module name for gradio app.
+ path(str): All resource path from gradio server.
+
+ Returns:
+ Any thing from gradio server.
+ '''
+ port = _gradio_apps[module_name]
+ if path == 'app':
+ proxy_url = request.url.replace(request.host_url + 'gradio/{}/app'.format(module_name),
+ 'http://localhost:{}/'.format(port))
+ else:
+ proxy_url = request.url.replace(request.host_url + 'gradio/{}/'.format(module_name),
+ 'http://localhost:{}/'.format(port))
+ resp = requests.request(method=request.method,
+ url=proxy_url,
+ headers={key: value
+ for (key, value) in request.headers if key != 'Host'},
+ data=request.get_data(),
+ cookies=request.cookies,
+ allow_redirects=False)
+ headers = [(name, value) for (name, value) in resp.raw.headers.items()]
+ response = Response(resp.content, resp.status_code, headers)
+ return response
+
return app_instance
diff --git a/requirements.txt b/requirements.txt
index f95cfe689940dbd35f750b4267f2804d8a6fe53b..29e16e42c9d0a9aa0bd1a6ad9f5f38beadc9c27f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,3 +17,4 @@ tqdm
visualdl >= 2.0.0
# gunicorn not support windows
gunicorn >= 19.10.0; sys_platform != "win32"
+gradio