diff --git a/modules/image/text_to_image/ernie_vilg/README.md b/modules/image/text_to_image/ernie_vilg/README.md index 82427c1d89693352929bda35cc6822f61ea3d1f6..74b339c1743d9f1c485524d850d595c585bae9ab 100755 --- a/modules/image/text_to_image/ernie_vilg/README.md +++ b/modules/image/text_to_image/ernie_vilg/README.md @@ -131,6 +131,9 @@ image = Image.open(BytesIO(base64.b64decode(result))) image.save('result_{}.png'.format(i)) +- ### gradio app 支持 + 从paddlehub 2.3.1开始支持使用链接 http://127.0.0.1:8866/gradio/ernie_vilg 在浏览器中访问ernie_vilg的gradio app。 + ## 五、更新历史 @@ -138,6 +141,17 @@ 初始发布 +* 1.1.0 + + 增加分辨率参数以及所支持的风格 + +* 1.2.0 + + 移除分辨率参数,移除默认 AK 和 SK + +* 1.3.0 + 新增对gradio app的支持 + ```shell - $ hub install ernie_vilg == 1.0.0 + $ hub install ernie_vilg == 1.3.0 ``` diff --git a/modules/image/text_to_image/ernie_vilg/module.py b/modules/image/text_to_image/ernie_vilg/module.py index dad3c98333073bccc61f72191e72257bba60d5ad..4a169ab229bf21eafb4d49a6681e5e75542038ef 100755 --- a/modules/image/text_to_image/ernie_vilg/module.py +++ b/modules/image/text_to_image/ernie_vilg/module.py @@ -10,6 +10,8 @@ from io import BytesIO from typing import List from typing import Optional +import gradio as gr +import numpy as np import requests from PIL import Image from tqdm.auto import tqdm @@ -21,7 +23,7 @@ from paddlehub.module.module import serving @moduleinfo(name="ernie_vilg", - version="1.0.0", + version="1.3.0", type="image/text_to_image", summary="", author="baidu-nlp", @@ -105,11 +107,11 @@ class ErnieVilG: print('API服务内部错误,可能引起原因有请求超时、模型推理错误等') raise RuntimeError("API服务内部错误,可能引起原因有请求超时、模型推理错误等") elif res['code'] == 100 or res['code'] == 110 or res['code'] == 111: - token = self._apply_token(self.ak, self.sk) + self.token = self._apply_token(self.ak, self.sk) res = requests.post(create_url, headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={ - 'access_token': token, + 'access_token': self.token, "text": text_prompt, "style": style }) @@ -150,11 +152,11 @@ class ErnieVilG: print('API服务内部错误,可能引起原因有请求超时、模型推理错误等') raise RuntimeError("API服务内部错误,可能引起原因有请求超时、模型推理错误等") elif res['code'] == 100 or res['code'] == 110 or res['code'] == 111: - token = self._apply_token(self.ak, self.sk) + self.token = self._apply_token(self.ak, self.sk) res = requests.post(get_url, headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={ - 'access_token': token, + 'access_token': self.token, 'taskId': {taskid} }) res = res.json() @@ -243,3 +245,312 @@ class ErnieVilG: self.arg_input_group.add_argument('--ak', type=str, default=None, help="申请文心api使用token的ak") self.arg_input_group.add_argument('--sk', type=str, default=None, help="申请文心api使用token的sk") self.arg_input_group.add_argument('--output_dir', type=str, default='ernievilg_output') + + def create_gradio_app(self): + ''' + Add gradio app for hub serving. + ''' + import paddlehub as hub + language_translation_model = hub.Module(name='baidu_translate') + language_recognition_model = hub.Module(name='baidu_language_recognition') + + style_list = [ + '古风', '油画', '水彩', '卡通', '二次元', '浮世绘', '蒸汽波艺术', 'low poly', '像素风格', '概念艺术', '未来主义', '赛博朋克', '写实风格', '洛丽塔风格', + '巴洛克风格', '超现实主义', '探索无限' + ] + + tips = { + "en": "Tips: The input text will be translated into Chinese for generation", + "jp": "ヒント: 入力テキストは生成のために中国語に翻訳されます", + "kor": "힌트: 입력 텍스트는 생성을 위해 중국어로 번역됩니다" + } + + count = 0 + + def translate_language(text_prompts): + nonlocal count + try: + count += 1 + tips_text = None + language_code = language_recognition_model.recognize(text_prompts) + if language_code != 'zh': + text_prompts = language_translation_model.translate(text_prompts, language_code, 'zh') + except Exception as e: + error_text = str(e) + return {status_text: error_text, language_tips_text: gr.update(visible=False)} + if language_code in tips: + tips_text = tips[language_code] + else: + tips_text = tips['en'] + if language_code == 'zh': + return { + language_tips_text: gr.update(visible=False), + translated_language: text_prompts, + trigger_component: gr.update(value=count, visible=False) + } + else: + return { + language_tips_text: gr.update(visible=True, value=tips_text), + translated_language: text_prompts, + trigger_component: gr.update(value=count, visible=False) + } + + def inference(text_prompts, style_indx): + try: + style = style_list[style_indx] + results = self.generate_image(text_prompts=text_prompts, style=style, visualization=False) + except Exception as e: + error_text = str(e) + return {status_text: error_text, gallery: None} + return {status_text: 'Success', gallery: results[:6]} + + title = "ERNIE-ViLG" + + description = "ERNIE-ViLG model, which supports text-to-image task." + + css = """ + .gradio-container { + font-family: 'IBM Plex Sans', sans-serif; + } + .gr-button { + color: white; + border-color: black; + background: black; + } + input[type='range'] { + accent-color: black; + } + .dark input[type='range'] { + accent-color: #dfdfdf; + } + .container { + max-width: 730px; + margin: auto; + padding-top: 1.5rem; + } + #gallery { + min-height: 22rem; + margin-bottom: 15px; + margin-left: auto; + margin-right: auto; + border-bottom-right-radius: .5rem !important; + border-bottom-left-radius: .5rem !important; + } + #gallery>div>.h-full { + min-height: 20rem; + } + .details:hover { + text-decoration: underline; + } + .gr-button { + white-space: nowrap; + } + .gr-button:focus { + border-color: rgb(147 197 253 / var(--tw-border-opacity)); + outline: none; + box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); + --tw-border-opacity: 1; + --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); + --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); + --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); + --tw-ring-opacity: .5; + } + .footer { + margin-bottom: 45px; + margin-top: 35px; + text-align: center; + border-bottom: 1px solid #e5e5e5; + } + .footer>p { + font-size: .8rem; + display: inline-block; + padding: 0 10px; + transform: translateY(10px); + background: white; + } + .dark .footer { + border-color: #303030; + } + .dark .footer>p { + background: #0b0f19; + } + .prompt h4{ + margin: 1.25em 0 .25em 0; + font-weight: bold; + font-size: 115%; + } + """ + + block = gr.Blocks(css=css) + + examples = [ + ['戴着眼镜的猫', '油画(Oil painting)'], + ['A cat with glasses', '油画(Oil painting)'], + ['眼鏡をかけた猫', '油画(Oil painting)'], + ['안경을 쓴 고양이', '油画(Oil painting)'], + ['日落时的城市天际线,史前遗迹风格', '油画(Oil painting)'], + ['一只猫坐在椅子上,戴着一副墨镜, low poly 风格', '卡通(Cartoon)'], + ['A cat sitting on a chair, wearing a pair of sunglasses, low poly style', '油画(Oil painting)'], + ['猫が椅子に座ってサングラスをかけている、low polyスタイル', '油画(Oil painting)'], + ['고양이 한 마리가 의자에 앉아 선글라스를 끼고 low poly 스타일을 하고 있다', '油画(Oil painting)'], + ['一只猫坐在椅子上,戴着一副墨镜,秋天风格', '探索无限(Explore infinity)'], + ['蒙娜丽莎,赛博朋克,宝丽来,33毫米,蒸汽波艺术', '探索无限(Explore infinity)'], + ['一只猫坐在椅子上,戴着一副墨镜,海盗风格', '探索无限(Explore infinity)'], + ['一条由闪电制成的令人敬畏的龙,概念艺术', '探索无限(Explore infinity)'], + ['An awesome dragon made of lightning, conceptual art', '油画(Oil painting)'], + ['稲妻で作られた畏敬の念を抱かせる竜、コンセプトアート', '油画(Oil painting)'], + ['번개로 만든 경외스러운 용, 개념 예술', '油画(Oil painting)'], + ['梵高猫头鹰,蒸汽波艺术', '探索无限(Explore infinity)'], + ['萨尔瓦多·达利描绘古代文明的超现实主义梦幻油画,写实风格', '探索无限(Explore infinity)'], + ['夕阳日落时,阳光落在云层上,海面波涛汹涌,风景,胶片感', '探索无限(Explore infinity)'], + ['Sunset, the sun falls on the clouds, the sea is rough, the scenery is filmy', '油画(Oil painting)'], + ['夕日が沈むと、雲の上に太陽の光が落ち、海面は波が荒く、風景、フィルム感', '油画(Oil painting)'], + ['석양이 질 때 햇빛이 구름 위에 떨어지고, 해수면의 파도가 용솟음치며, 풍경, 필름감', '油画(Oil painting)'], + ] + + with block: + gr.HTML(""" +
+
+ Paddlehub +
+
+

+ ERNIE-ViLG Demo +

+
+

+ ERNIE-ViLG is a state-of-the-art text-to-image model that generates + images from Chinese text. +

+ star Paddlehub +
+ """) + with gr.Group(): + with gr.Box(): + with gr.Row().style(mobile_collapse=False, equal_height=True): + text = gr.Textbox( + label="Prompt", + show_label=False, + max_lines=1, + placeholder="Enter your prompt, multiple languages are supported now.", + ).style( + border=(True, False, True, True), + rounded=(True, False, False, True), + container=False, + ) + + btn = gr.Button("Generate image").style( + margin=False, + rounded=(False, True, True, False), + ) + language_tips_text = gr.Textbox(label="language tips", show_label=False, visible=False, max_lines=1) + styles = gr.Dropdown(label="风格(style)", + choices=[ + '古风(Ancient Style)', '油画(Oil painting)', '水彩(Watercolor)', '卡通(Cartoon)', + '二次元(Anime)', '浮世绘(Ukiyoe)', '蒸汽波艺术(Vaporwave)', 'low poly', + '像素风格(Pixel Style)', '概念艺术(Conceptual Art)', '未来主义(Futurism)', + '赛博朋克(Cyberpunk)', '写实风格(Realistic style)', '洛丽塔风格(Lolita style)', + '巴洛克风格(Baroque style)', '超现实主义(Surrealism)', '探索无限(Explore infinity)' + ], + value='卡通(Cartoon)', + type="index") + gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2, 3], + height="auto") + status_text = gr.Textbox(label="处理状态(Process status)", show_label=True, max_lines=1, interactive=False) + trigger_component = gr.Textbox( + vaule="", visible=False) # This component is used for triggering inference funtion. + translated_language = gr.Textbox(vaule="", visible=False) + + ex = gr.Examples(examples=examples, + fn=translate_language, + inputs=[text], + outputs=[language_tips_text, status_text, trigger_component, translated_language], + cache_examples=False) + ex.dataset.headers = [""] + + text.submit(translate_language, + inputs=[text], + outputs=[language_tips_text, status_text, trigger_component, translated_language]) + btn.click(translate_language, + inputs=[text], + outputs=[language_tips_text, status_text, trigger_component, translated_language]) + trigger_component.change(fn=inference, + inputs=[translated_language, styles], + outputs=[status_text, gallery]) + gr.HTML(""" +
+

Prompt公式

+ Prompt = 图片主体,细节词,修饰词 + 关于各部分的构造方式和效果,可以参考YouPromptMe指南。 + 更多的模型,请关注 PaddleHub 官方Repo , 如果你觉得不错,请star收藏吧。 +

Stars8.4k

+ 同时,可以在 aistudio 上使用免费的GPU体验更多案例。 +

+
+
+

Prompt format

+ Prompt = object, details, description + For more details, please refer to YouPromptMe Guide. + There are more interesting models in PaddleHub, if you think it's great, welcome to star PaddleHub. +

Stars8.4k

+ Besides, you can use free GPU resourses in aistudio to enjoy more cases, have fun. +

+
+ + """) + gr.Markdown(""" + 在"探索无限"的风格模式下,画作的真实风格完全可以由你的prompt来决定。下面是一些参考案例: + + In "Explore infinity" style mode, how the image looks like is totally up to your prompt. Below are some cases: + + |drawing| + | --- | + | prompt:蒙娜丽莎,赛博朋克,宝丽来,33毫米,
蒸汽波艺术 | + + + |drawing| + | --- | + | prompt:火焰,凤凰,少女,未来感,高清,3d,
精致面容,cg感,古风,唯美,毛发细致,上半身立绘 | + + + |drawing| + | --- | + | prompt:巨狼,飘雪,蓝色大片烟雾,毛发细致,
烟雾缭绕,高清,3d,cg感,侧面照 | + + + | drawing | + | --- | + | prompt:浮世绘日本科幻哑光绘画,概念艺术,
动漫风格神道寺禅园英雄动作序列,包豪斯| + + drawing + + ### [更多内容...](https://github.com/PaddlePaddle/PaddleHub/blob/develop/modules/image/text_to_image/ernie_vilg/README.md#四-prompt-指南)([Explore more...](https://github.com/PaddlePaddle/PaddleHub/blob/develop/modules/image/text_to_image/ernie_vilg/README.md#四-prompt-指南)) + + + """) + gr.HTML(''' + + ''') + + return block diff --git a/modules/text/lexical_analysis/jieba_paddle/README.md b/modules/text/lexical_analysis/jieba_paddle/README.md index 8d141131f0eff7cd672000ec82e9b60d0248dd02..67cc1be5698f00050c41ad52c443c1fbbb0a6ecb 100644 --- a/modules/text/lexical_analysis/jieba_paddle/README.md +++ b/modules/text/lexical_analysis/jieba_paddle/README.md @@ -207,6 +207,10 @@ - 关于PaddleHub Serving更多信息参考:[服务部署](../../../../docs/docs_ch/tutorial/serving.md) +- ## gradio app 支持 + 从paddlehub 2.3.1开始支持使用链接 http://127.0.0.1:8866/gradio/jieba_paddle 在浏览器中访问jieba_paddle的gradio app。 + + ## 五、更新历史 @@ -218,6 +222,9 @@ 移除 fluid api +* 1.1.0 + 新增对gradio app的支持 + - ```shell - $ hub install jieba_paddle==1.0.1 + $ hub install jieba_paddle==1.1.0 ``` diff --git a/modules/text/lexical_analysis/jieba_paddle/module.py b/modules/text/lexical_analysis/jieba_paddle/module.py index 45b0bacd3c77d387a40d8a7bda6c60f9c2357c8d..955b34e9abc4d6fe1d09bc0519004254607347fe 100644 --- a/modules/text/lexical_analysis/jieba_paddle/module.py +++ b/modules/text/lexical_analysis/jieba_paddle/module.py @@ -14,7 +14,7 @@ from paddlehub.module.module import serving @moduleinfo( name="jieba_paddle", - version="1.0.1", + version="1.1.0", summary= "jieba_paddle is a chineses tokenizer using BiGRU base on the PaddlePaddle deeplearning framework. More information please refer to https://github.com/fxsjy/jieba.", author="baidu-paddle", @@ -54,6 +54,24 @@ class JiebaPaddle(hub.Module): return seg_list + def create_gradio_app(self): + import gradio as gr + + def inference(text): + results = self.cut(sentence=text) + return results + + title = "jieba_paddle" + description = "jieba_paddle is a word segmentation model based on paddlepaddle deep learning framework." + + examples = [['今天是个好日子']] + app = gr.Interface(inference, + "text", [gr.outputs.Textbox(label="words")], + title=title, + description=description, + examples=examples) + return app + def check_dependency(self): """ Check jieba tool dependency. diff --git a/modules/text/lexical_analysis/lac/module.py b/modules/text/lexical_analysis/lac/module.py index df6993761b7f410b2fcf3aae5ef688bc93ca2651..e1d34c6b69b5692175ca575b4d111f1196c61938 100644 --- a/modules/text/lexical_analysis/lac/module.py +++ b/modules/text/lexical_analysis/lac/module.py @@ -13,21 +13,18 @@ import os import numpy as np import paddle import six -from lac.custom import Customization -from lac.processor import load_kv_dict -from lac.processor import parse_result -from lac.processor import word_to_ids from paddle.inference import Config from paddle.inference import create_predictor -import paddlehub as hub -from paddlehub.common.logger import logger -from paddlehub.common.paddle_helper import add_vars_prefix -from paddlehub.common.utils import sys_stdin_encoding -from paddlehub.io.parser import txt_parser +from .custom import Customization +from .processor import load_kv_dict +from .processor import parse_result +from .processor import word_to_ids from paddlehub.module.module import moduleinfo from paddlehub.module.module import runnable from paddlehub.module.module import serving +from paddlehub.utils.parser import txt_parser +from paddlehub.utils.utils import sys_stdin_encoding class DataFormatError(Exception): @@ -44,9 +41,9 @@ class DataFormatError(Exception): author="baidu-nlp", author_email="paddle-dev@baidu.com", type="nlp/lexical_analysis") -class LAC(hub.Module): +class LAC: - def _initialize(self, user_dict=None): + def __init__(self, user_dict=None): """ initialize with the necessary elements """ @@ -72,7 +69,10 @@ class LAC(hub.Module): """ predictor config setting """ - cpu_config = Config(self.pretrained_model_path) + model = self.default_pretrained_model_path + '.pdmodel' + params = self.default_pretrained_model_path + '.pdiparams' + cpu_config = Config(model, params) + cpu_config.disable_glog_info() cpu_config.disable_gpu() self.cpu_predictor = create_predictor(cpu_config) diff --git a/paddlehub/serving/app_compat.py b/paddlehub/serving/app_compat.py index 88464733de4ef692667e5605c2d1f8eec1156909..a3f3f50cdf5a0da551024b124ad6adfe24823319 100644 --- a/paddlehub/serving/app_compat.py +++ b/paddlehub/serving/app_compat.py @@ -12,19 +12,30 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import traceback +import socket +import threading import time +import traceback +from multiprocessing import Process +from threading import Lock -from flask import Flask, request +import requests +from flask import Flask +from flask import redirect +from flask import request +from flask import Response from paddlehub.serving.model_service.base_model_service import cv_module_info from paddlehub.serving.model_service.base_model_service import nlp_module_info from paddlehub.serving.model_service.base_model_service import v2_module_info -from paddlehub.utils import utils, log +from paddlehub.utils import log +from paddlehub.utils import utils filename = 'HubServing-%s.log' % time.strftime("%Y_%m_%d", time.localtime()) +_gradio_apps = {} # Used to store all launched gradio apps +_lock = Lock() # Used to prevent parallel requests to launch a server twice + def package_result(status: str, msg: str, data: dict): ''' @@ -55,6 +66,54 @@ def package_result(status: str, msg: str, data: dict): return {"status": status, "msg": msg, "results": data} +def create_gradio_app(module_info: dict): + ''' + Create a gradio app and launch a server for users. + Args: + module_info(dict): Module info include module name, method name and + other info. + Return: + int: port number, if server has been successful. + + Exception: + Raise a exception if server can not been launched. + ''' + module_name = module_info['module_name'] + port = None + with _lock: + if module_name not in _gradio_apps: + try: + serving_method = getattr(module_info["module"], 'create_gradio_app') + except Exception: + raise RuntimeError('Module {} is not supported for gradio app.'.format(module_name)) + + def get_free_tcp_port(): + tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + tcp.bind(('localhost', 0)) + addr, port = tcp.getsockname() + tcp.close() + return port + + port = get_free_tcp_port() + app = serving_method() + process = Process(target=app.launch, kwargs={'server_port': port}) + process.start() + + def check_alive(): + nonlocal port + while True: + try: + requests.get('http://localhost:{}/'.format(port)) + break + except Exception: + time.sleep(1) + + check_alive() + _gradio_apps[module_name] = port + return port + + def predict_v2(module_info: dict, input: dict): ''' @@ -159,6 +218,47 @@ def create_app(init_flag: bool = False, configs: dict = None): results = predict_v2(module_info, inputs) return results + @app_instance.route('/gradio/', methods=["GET", "POST"]) + def gradio_app(module_name: str): + if module_name in v2_module_info.modules: + module_info = v2_module_info.get_module_info(module_name) + module_info['module_name'] = module_name + else: + msg = "Module {} is not supported for gradio app.".format(module_name) + return package_result("111", msg, "") + create_gradio_app(module_info) + return redirect("/gradio/{}/app".format(module_name), code=302) + + @app_instance.route("/gradio//", methods=["GET", "POST"]) + def request_gradio_app(module_name: str, path: str): + ''' + Gradio app server url interface. We route urls for gradio app to gradio server. + + Args: + module_name(str): Module name for gradio app. + path(str): All resource path from gradio server. + + Returns: + Any thing from gradio server. + ''' + port = _gradio_apps[module_name] + if path == 'app': + proxy_url = request.url.replace(request.host_url + 'gradio/{}/app'.format(module_name), + 'http://localhost:{}/'.format(port)) + else: + proxy_url = request.url.replace(request.host_url + 'gradio/{}/'.format(module_name), + 'http://localhost:{}/'.format(port)) + resp = requests.request(method=request.method, + url=proxy_url, + headers={key: value + for (key, value) in request.headers if key != 'Host'}, + data=request.get_data(), + cookies=request.cookies, + allow_redirects=False) + headers = [(name, value) for (name, value) in resp.raw.headers.items()] + response = Response(resp.content, resp.status_code, headers) + return response + return app_instance diff --git a/requirements.txt b/requirements.txt index f95cfe689940dbd35f750b4267f2804d8a6fe53b..29e16e42c9d0a9aa0bd1a6ad9f5f38beadc9c27f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,3 +17,4 @@ tqdm visualdl >= 2.0.0 # gunicorn not support windows gunicorn >= 19.10.0; sys_platform != "win32" +gradio