diff --git a/visualdl/component/inference/fastdeploy_client/client_app.py b/visualdl/component/inference/fastdeploy_client/client_app.py
index 397b8255ab75984d490f2ab9e0ce426da6054e55..76447abae7c033355c132493eae25db603544e1f 100644
--- a/visualdl/component/inference/fastdeploy_client/client_app.py
+++ b/visualdl/component/inference/fastdeploy_client/client_app.py
@@ -18,6 +18,7 @@ import numpy as np
from .http_client_manager import get_metric_data
from .http_client_manager import HttpClientManager
from .http_client_manager import metrics_table_head
+from .http_client_manager import metrics_table_head_en
from .visualizer import visualize_detection
from .visualizer import visualize_face_alignment
from .visualizer import visualize_face_detection
@@ -257,11 +258,400 @@ def create_gradio_client_app(): # noqa:C901
max_lines=1,
interactive=False)
+ lang_text = gr.Textbox(
+ label="lang",
+ show_label=False,
+ value='zh',
+ max_lines=1,
+ visible=False
+ ) # This text box is only used for divide zh and en page
+
+ all_input_output_components = input_accordions + input_name_texts + input_images + \
+ input_texts + output_accordions + output_name_texts + output_images + output_texts
+
+ def get_input_output_name(server_ip, server_port, model_name,
+ model_version, lang_text):
+ try:
+ server_addr = server_ip + ':' + server_port
+ input_metas, output_metas = _http_manager.get_model_meta(
+ server_addr, model_name, model_version)
+ except Exception as e:
+ return {status_text: str(e)}
+ results = {
+ component: None
+ for component in all_input_output_components
+ }
+ results[component_format_column] = gr.update(visible=True)
+ for input_accordio in input_accordions:
+ results[input_accordio] = gr.update(visible=False)
+ for output_accordio in output_accordions:
+ results[output_accordio] = gr.update(visible=False)
+ results[status_text] = 'Get model inputs and outputs successfully.'
+ for i, input_meta in enumerate(input_metas):
+ results[input_accordions[i]] = gr.update(visible=True)
+ results[input_name_texts[i]] = input_meta['name']
+ for i, output_meta in enumerate(output_metas):
+ results[output_accordions[i]] = gr.update(visible=True)
+ results[output_name_texts[i]] = output_meta['name']
+ return results
+
+ def component_inference(*args):
+ server_ip = args[0]
+ http_port = args[1]
+ metric_port = args[2]
+ model_name = args[3]
+ model_version = args[4]
+ names = args[5:5 + len(input_name_texts)]
+ images = args[5 + len(input_name_texts):5 + len(input_name_texts) +
+ len(input_images)]
+ texts = args[5 + len(input_name_texts) + len(input_images):5 +
+ len(input_name_texts) + len(input_images) +
+ len(input_texts)]
+ task_type = args[-1]
+ server_addr = server_ip + ':' + http_port
+ if server_ip and http_port and model_name and model_version:
+ inputs = {}
+ for i, input_name in enumerate(names):
+ if input_name:
+ if images[i] is not None:
+ inputs[input_name] = np.array([images[i]])
+ if texts[i]:
+ inputs[input_name] = np.array(
+ [[texts[i].encode('utf-8')]], dtype=np.object_)
+ try:
+ infer_results = _http_manager.infer(
+ server_addr, model_name, model_version, inputs)
+ results = {status_text: 'Inference successfully.'}
+ for i, (output_name,
+ data) in enumerate(infer_results.items()):
+ results[output_name_texts[i]] = output_name
+ results[output_texts[i]] = str(data)
+ if task_type != 'unspecified':
+ try:
+ results[output_images[i]] = supported_tasks[
+ task_type](images[0], data)
+ except Exception:
+ results[output_images[i]] = None
+ if metric_port:
+ html_table = get_metric_data(server_ip, metric_port,
+ 'zh')
+ results[output_html_table] = html_table
+ return results
+ except Exception as e:
+ return {status_text: 'Error: {}'.format(e)}
+ else:
+ return {
+ status_text:
+ 'Please input server addr, model name and model version.'
+ }
+
+ def raw_inference(*args):
+ server_ip = args[0]
+ http_port = args[1]
+ metric_port = args[2]
+ model_name = args[3]
+ model_version = args[4]
+ payload_text = args[5]
+ server_addr = server_ip + ':' + http_port
+ try:
+ result = _http_manager.raw_infer(server_addr, model_name,
+ model_version, payload_text)
+ results = {
+ status_text: 'Get response from server',
+ output_raw_text: result
+ }
+ if server_ip and metric_port:
+ html_table = get_metric_data(server_ip, metric_port, 'zh')
+ results[output_html_table] = html_table
+ return results
+ except Exception as e:
+ return {status_text: 'Error: {}'.format(e)}
+
+ def update_metric(server_ip, metrics_port, lang_text):
+ if server_ip and metrics_port:
+ try:
+ html_table = get_metric_data(server_ip, metrics_port, 'zh')
+ return {
+ output_html_table: html_table,
+ status_text: "Update metrics successfully."
+ }
+ except Exception as e:
+ return {status_text: 'Error: {}'.format(e)}
+ else:
+ return {
+ status_text: 'Please input server ip and metrics_port.'
+ }
+
+ check_button.click(
+ fn=get_input_output_name,
+ inputs=[
+ server_addr_text, server_http_port_text, model_name_text,
+ model_version_text, lang_text
+ ],
+ outputs=[
+ *all_input_output_components, check_button,
+ component_format_column, status_text
+ ])
+ component_submit_button.click(
+ fn=component_inference,
+ inputs=[
+ server_addr_text, server_http_port_text,
+ server_metric_port_text, model_name_text, model_version_text,
+ *input_name_texts, *input_images, *input_texts, task_radio
+ ],
+ outputs=[
+ *output_name_texts, *output_images, *output_texts, status_text,
+ output_html_table
+ ])
+ raw_submit_button.click(
+ fn=raw_inference,
+ inputs=[
+ server_addr_text, server_http_port_text,
+ server_metric_port_text, model_name_text, model_version_text,
+ raw_payload_text
+ ],
+ outputs=[output_raw_text, status_text, output_html_table])
+ update_metric_button.click(
+ fn=update_metric,
+ inputs=[server_addr_text, server_metric_port_text, lang_text],
+ outputs=[output_html_table, status_text])
+ return block
+
+
+def create_gradio_client_app_en(): # noqa:C901
+ css = """
+ .gradio-container {
+ font-family: 'IBM Plex Sans', sans-serif;
+ }
+ .gr-button {
+ color: white;
+ border-color: black;
+ background: black;
+ }
+ input[type='range'] {
+ accent-color: black;
+ }
+ .dark input[type='range'] {
+ accent-color: #dfdfdf;
+ }
+ #gallery {
+ min-height: 22rem;
+ margin-bottom: 15px;
+ margin-left: auto;
+ margin-right: auto;
+ border-bottom-right-radius: .5rem !important;
+ border-bottom-left-radius: .5rem !important;
+ }
+ #gallery>div>.h-full {
+ min-height: 20rem;
+ }
+ .details:hover {
+ text-decoration: underline;
+ }
+ .gr-button {
+ white-space: nowrap;
+ }
+ .gr-button:focus {
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
+ outline: none;
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
+ --tw-border-opacity: 1;
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) \
+ var(--tw-ring-offset-color);
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
+ --tw-ring-opacity: .5;
+ }
+ .footer {
+ margin-bottom: 45px;
+ margin-top: 35px;
+ text-align: center;
+ border-bottom: 1px solid #e5e5e5;
+ }
+ .footer>p {
+ font-size: .8rem;
+ display: inline-block;
+ padding: 0 10px;
+ transform: translateY(10px);
+ background: white;
+ }
+ .dark .footer {
+ border-color: #303030;
+ }
+ .dark .footer>p {
+ background: #0b0f19;
+ }
+ .prompt h4{
+ margin: 1.25em 0 .25em 0;
+ font-weight: bold;
+ font-size: 115%;
+ }
+ """
+
+ block = gr.Blocks(css=css)
+
+ with block:
+ gr.HTML("""
+
+
+
+ FastDeploy Client
+
+
+
+ The client is used for creating requests to fastdeploy server.
+
+
+ """)
+ with gr.Group():
+ with gr.Box():
+ with gr.Column():
+ with gr.Row():
+ server_addr_text = gr.Textbox(
+ label="server ip",
+ show_label=True,
+ max_lines=1,
+ placeholder="localhost",
+ )
+
+ server_http_port_text = gr.Textbox(
+ label="server port",
+ show_label=True,
+ max_lines=1,
+ placeholder="8000",
+ )
+
+ server_metric_port_text = gr.Textbox(
+ label="metrics port",
+ show_label=True,
+ max_lines=1,
+ placeholder="8002",
+ )
+ with gr.Row():
+ model_name_text = gr.Textbox(
+ label="model name",
+ show_label=True,
+ max_lines=1,
+ placeholder="yolov5",
+ )
+ model_version_text = gr.Textbox(
+ label="model version",
+ show_label=True,
+ max_lines=1,
+ placeholder="1",
+ )
+
+ with gr.Box():
+ with gr.Tab("Component form"):
+ check_button = gr.Button("get model input and output")
+ component_format_column = gr.Column(visible=False)
+ with component_format_column:
+ task_radio = gr.Radio(
+ choices=list(supported_tasks.keys()),
+ value='unspecified',
+ label='task type',
+ visible=True)
+ gr.Markdown(
+ "Choose text or image component to input according to data type"
+ )
+ with gr.Row():
+ with gr.Column():
+ gr.Markdown("Inputs")
+ input_accordions = []
+ input_name_texts = []
+ input_images = []
+ input_texts = []
+ for i in range(6):
+ accordion = gr.Accordion(
+ "variable {}".format(i),
+ open=True,
+ visible=False)
+ with accordion:
+ input_name_text = gr.Textbox(
+ label="variable name",
+ interactive=False)
+ input_image = gr.Image(type='numpy')
+ input_text = gr.Textbox(
+ label="text", max_lines=1000)
+ input_accordions.append(accordion)
+ input_name_texts.append(input_name_text)
+ input_images.append(input_image)
+ input_texts.append(input_text)
+
+ with gr.Column():
+ gr.Markdown("Outputs")
+ output_accordions = []
+ output_name_texts = []
+ output_images = []
+ output_texts = []
+ for i in range(6):
+ accordion = gr.Accordion(
+ "variable {}".format(i),
+ open=True,
+ visible=False)
+ with accordion:
+ output_name_text = gr.Textbox(
+ label="variable name",
+ interactive=False)
+ output_text = gr.Textbox(
+ label="text",
+ interactive=False,
+ show_label=True)
+ output_image = gr.Image(
+ interactive=False)
+ output_accordions.append(accordion)
+ output_name_texts.append(output_name_text)
+ output_images.append(output_image)
+ output_texts.append(output_text)
+ component_submit_button = gr.Button("submit request")
+ with gr.Tab("Original form"):
+ gr.Markdown("Request")
+ raw_payload_text = gr.Textbox(
+ label="request payload", max_lines=10000)
+ with gr.Column():
+ gr.Markdown("Response")
+ output_raw_text = gr.Textbox(
+ label="raw response data", interactive=False)
+ raw_submit_button = gr.Button("submit request")
+
+ with gr.Box():
+ with gr.Column():
+ gr.Markdown(
+ "Metrics(update automatically when submit request,or click update metrics button manually)"
+ )
+ output_html_table = gr.HTML(
+ label="metrics",
+ interactive=False,
+ show_label=False,
+ value=metrics_table_head_en.format('', ''))
+ update_metric_button = gr.Button("update metrics")
+
+ status_text = gr.Textbox(
+ label="status",
+ show_label=True,
+ max_lines=1,
+ interactive=False)
+
+ lang_text = gr.Textbox(
+ label="lang",
+ show_label=False,
+ value='en',
+ max_lines=1,
+ visible=False
+ ) # This text box is only used for divide zh and en page
+
all_input_output_components = input_accordions + input_name_texts + input_images + \
input_texts + output_accordions + output_name_texts + output_images + output_texts
def get_input_output_name(server_ip, server_port, model_name,
- model_version):
+ model_version, lang_text):
try:
server_addr = server_ip + ':' + server_port
input_metas, output_metas = _http_manager.get_model_meta(
@@ -273,12 +663,11 @@ def create_gradio_client_app(): # noqa:C901
for component in all_input_output_components
}
results[component_format_column] = gr.update(visible=True)
- # results[check_button] = gr.update(visible=False)
for input_accordio in input_accordions:
results[input_accordio] = gr.update(visible=False)
for output_accordio in output_accordions:
results[output_accordio] = gr.update(visible=False)
- results[status_text] = 'GetInputOutputName Successful'
+ results[status_text] = 'Get model inputs and outputs successfully.'
for i, input_meta in enumerate(input_metas):
results[input_accordions[i]] = gr.update(visible=True)
results[input_name_texts[i]] = input_meta['name']
@@ -313,7 +702,7 @@ def create_gradio_client_app(): # noqa:C901
try:
infer_results = _http_manager.infer(
server_addr, model_name, model_version, inputs)
- results = {status_text: 'Inference Successful'}
+ results = {status_text: 'Inference successfully.'}
for i, (output_name,
data) in enumerate(infer_results.items()):
results[output_name_texts[i]] = output_name
@@ -325,7 +714,8 @@ def create_gradio_client_app(): # noqa:C901
except Exception:
results[output_images[i]] = None
if metric_port:
- html_table = get_metric_data(server_ip, metric_port)
+ html_table = get_metric_data(server_ip, metric_port,
+ 'en')
results[output_html_table] = html_table
return results
except Exception as e:
@@ -352,19 +742,19 @@ def create_gradio_client_app(): # noqa:C901
output_raw_text: result
}
if server_ip and metric_port:
- html_table = get_metric_data(server_ip, metric_port)
+ html_table = get_metric_data(server_ip, metric_port, 'en')
results[output_html_table] = html_table
return results
except Exception as e:
return {status_text: 'Error: {}'.format(e)}
- def update_metric(server_ip, metrics_port):
+ def update_metric(server_ip, metrics_port, lang_text):
if server_ip and metrics_port:
try:
- html_table = get_metric_data(server_ip, metrics_port)
+ html_table = get_metric_data(server_ip, metrics_port, 'en')
return {
output_html_table: html_table,
- status_text: "Successfully update metrics."
+ status_text: "Update metrics successfully."
}
except Exception as e:
return {status_text: 'Error: {}'.format(e)}
@@ -377,7 +767,7 @@ def create_gradio_client_app(): # noqa:C901
fn=get_input_output_name,
inputs=[
server_addr_text, server_http_port_text, model_name_text,
- model_version_text
+ model_version_text, lang_text
],
outputs=[
*all_input_output_components, check_button,
@@ -404,6 +794,6 @@ def create_gradio_client_app(): # noqa:C901
outputs=[output_raw_text, status_text, output_html_table])
update_metric_button.click(
fn=update_metric,
- inputs=[server_addr_text, server_metric_port_text],
+ inputs=[server_addr_text, server_metric_port_text, lang_text],
outputs=[output_html_table, status_text])
return block
diff --git a/visualdl/component/inference/fastdeploy_client/http_client_manager.py b/visualdl/component/inference/fastdeploy_client/http_client_manager.py
index 691594152efd219239e15b4c497052b9b3b8f4ce..e5fe85c0fe6ff40d983ac51e9e0384a9fb1ecaa2 100644
--- a/visualdl/component/inference/fastdeploy_client/http_client_manager.py
+++ b/visualdl/component/inference/fastdeploy_client/http_client_manager.py
@@ -19,7 +19,6 @@ import numpy as np
import requests
import tritonclient.http as httpclient
from attrdict import AttrDict
-from tritonclient.utils import InferenceServerException
def convert_http_metadata_config(metadata):
@@ -118,8 +117,63 @@ table, th {{
"""
+metrics_table_head_en = """
+
+
+
+
+
+ Model name |
+ Execution metric |
+ Delay metric |
+
+
+
+ inference request success |
+ inference request failure |
+ inference count |
+ inference exec count |
+ inference request duration(ms) |
+ inference queue duration(ms) |
+ inference comput input duration(ms) |
+ inference compute infer duration
+(ms) |
+ inference compute output duration(ms) |
+
+ {}
+
+
+
+
+
+
+
+
+
+
+ GPU |
+ Performance metric |
+ Memory |
+
+
+ utilization(%) |
+ power usage(W) |
+ power limit(W) |
+ energy consumption(W) |
+ total(GB) |
+ used(GB) |
+
+ {}
+
+
+"""
+
-def get_metric_data(server_addr, metric_port): # noqa:C901
+def get_metric_data(server_addr, metric_port, lang='zh'): # noqa:C901
'''
Get metrics data from fastdeploy server, and transform it into html table.
Args:
@@ -235,6 +289,8 @@ def get_metric_data(server_addr, metric_port): # noqa:C901
for item in data]) + ""
for data in gpu_data_list
])
+ if lang == 'en':
+ return metrics_table_head_en.format(model_data, gpu_data)
return metrics_table_head.format(model_data, gpu_data)
@@ -294,7 +350,7 @@ class HttpClientManager:
try:
model_metadata = fastdeploy_client.get_model_metadata(
model_name=model_name, model_version=model_version)
- except InferenceServerException as e:
+ except Exception as e:
raise RuntimeError("Failed to retrieve the metadata: " + str(e))
model_metadata = convert_http_metadata_config(model_metadata)
diff --git a/visualdl/component/inference/fastdeploy_server.py b/visualdl/component/inference/fastdeploy_server.py
index 89b0b13ff443917ea636776274b64defbc857ca7..8d07b1faef6338d36ba714b3556deaf47a309efe 100644
--- a/visualdl/component/inference/fastdeploy_server.py
+++ b/visualdl/component/inference/fastdeploy_server.py
@@ -25,6 +25,7 @@ from pathlib import Path
import requests
from .fastdeploy_client.client_app import create_gradio_client_app
+from .fastdeploy_client.client_app import create_gradio_client_app_en
from .fastdeploy_lib import analyse_config
from .fastdeploy_lib import check_process_zombie
from .fastdeploy_lib import copy_config_file_to_default_config
@@ -53,7 +54,8 @@ class FastDeployServerApi(object):
self.root_dir = Path(os.getcwd())
self.opened_servers = {
} # Use to store the opened server process pid and process itself
- self.client_port = None
+ self.client_port = None # Chinese version
+ self.client_en_port = None # English version
@result()
def get_directory(self, cur_dir):
@@ -351,34 +353,43 @@ class FastDeployServerApi(object):
version_filenames_dict_for_frontend)
return version_info_for_frontend
- def create_fastdeploy_client(self):
- if self.client_port is None:
-
- def get_free_tcp_port():
- tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- # tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- tcp.bind(('localhost', 0))
- addr, port = tcp.getsockname()
- tcp.close()
- return port
-
- self.client_port = get_free_tcp_port()
- app = create_gradio_client_app()
- thread = Process(
- target=app.launch, kwargs={'server_port': self.client_port})
- thread.start()
-
- def check_alive():
- while True:
- try:
- requests.get('http://localhost:{}/'.format(
- self.client_port))
- break
- except Exception:
- time.sleep(1)
-
- check_alive()
- return self.client_port
+ def create_fastdeploy_client(self, lang='zh'):
+ def get_free_tcp_port():
+ tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ # tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ tcp.bind(('localhost', 0))
+ addr, port = tcp.getsockname()
+ tcp.close()
+ return port
+
+ def check_alive(client_port):
+ while True:
+ try:
+ requests.get('http://localhost:{}/'.format(client_port))
+ break
+ except Exception:
+ time.sleep(1)
+
+ if lang == 'en':
+ if self.client_en_port is None:
+ self.client_en_port = get_free_tcp_port()
+ app = create_gradio_client_app_en()
+ thread = Process(
+ target=app.launch,
+ kwargs={'server_port': self.client_en_port})
+ thread.start()
+ check_alive(self.client_en_port)
+ return self.client_en_port
+ else:
+ if self.client_port is None:
+ self.client_port = get_free_tcp_port()
+ app = create_gradio_client_app()
+ thread = Process(
+ target=app.launch,
+ kwargs={'server_port': self.client_port})
+ thread.start()
+ check_alive(self.client_port)
+ return self.client_port
def _poll_zombie_process(self):
# check if there are servers killed by other vdl app instance and become zoombie
@@ -410,7 +421,7 @@ def create_fastdeploy_api_call():
'start_server': (api.start_server, ['config']),
'stop_server': (api.stop_server, ['server_id']),
'get_server_output': (api.get_server_output, ['server_id', 'length']),
- 'create_fastdeploy_client': (api.create_fastdeploy_client, []),
+ 'create_fastdeploy_client': (api.create_fastdeploy_client, ['lang']),
'get_server_list': (api.get_server_list, []),
'get_server_metric': (api.get_server_metric, ['server_id']),
'get_server_config': (api.get_server_config, ['server_id']),
diff --git a/visualdl/server/app.py b/visualdl/server/app.py
index e451c4e2188b16bce565f3b45ef8de6b4b4030c9..6e96ce02aaaca84e62cb4e2a78aab4c2a3e5e15c 100644
--- a/visualdl/server/app.py
+++ b/visualdl/server/app.py
@@ -181,6 +181,7 @@ def create_app(args): # noqa: C901
error_msg = '{}'.format(e)
return make_response(error_msg)
args = urllib.parse.urlencode(request_args)
+
if args:
return redirect(
api_path + "/fastdeploy/fastdeploy_client/app?{}".format(args),
@@ -201,14 +202,30 @@ def create_app(args): # noqa: C901
Returns:
Any thing from gradio server.
'''
+ lang = 'zh'
if request.method == 'POST':
+ if request.mimetype == 'application/json':
+ request_args = request.json
+ else:
+ request_args = request.form.to_dict()
+ if 'data' in request_args:
+ lang = request_args['data'][-1]
+ request_args['lang'] = lang
+ elif 'lang' in request_args:
+ lang = request_args['lang']
+
port = fastdeploy_api_call('create_fastdeploy_client',
- request.form)
- request_args = request.form
+ request_args)
else:
+ request_args = request.args.to_dict()
+ if 'data' in request_args:
+ lang = request_args['data'][-1]
+ request_args['lang'] = lang
+ elif 'lang' in request_args:
+ lang = request_args['lang']
port = fastdeploy_api_call('create_fastdeploy_client',
- request.args)
- request_args = request.args
+ request_args)
+
if path == 'app':
proxy_url = request.url.replace(
request.host_url.rstrip('/') + api_path +
@@ -239,38 +256,82 @@ def create_app(args): # noqa: C901
model_name = start_args.get('default_model_name', '')
content = content.decode()
try:
- default_server_addr = re.search(
- '"label": {}.*?"value": "".*?}}'.format(
- json.dumps("服务ip", ensure_ascii=True).replace(
- '\\', '\\\\')), content).group(0)
- cur_server_addr = default_server_addr.replace(
- '"value": ""', '"value": "localhost"')
- default_http_port = re.search(
- '"label": {}.*?"value": "".*?}}'.format(
- json.dumps("推理服务端口", ensure_ascii=True).replace(
- '\\', '\\\\')), content).group(0)
- cur_http_port = default_http_port.replace(
- '"value": ""', '"value": "{}"'.format(http_port))
- default_metrics_port = re.search(
- '"label": {}.*?"value": "".*?}}'.format(
- json.dumps("性能服务端口", ensure_ascii=True).replace(
- '\\', '\\\\')), content).group(0)
- cur_metrics_port = default_metrics_port.replace(
- '"value": ""', '"value": "{}"'.format(metrics_port))
- default_model_name = re.search(
- '"label": {}.*?"value": "".*?}}'.format(
- json.dumps("模型名称", ensure_ascii=True).replace(
- '\\', '\\\\')), content).group(0)
- cur_model_name = default_model_name.replace(
- '"value": ""', '"value": "{}"'.format(model_name))
- default_model_version = re.search(
- '"label": {}.*?"value": "".*?}}'.format(
- json.dumps("模型版本", ensure_ascii=True).replace(
- '\\', '\\\\')), content).group(0)
- cur_model_version = default_model_version.replace(
- '"value": ""', '"value": "{}"'.format('1'))
- content = content.replace(default_server_addr,
- cur_server_addr)
+ if request_args.get('lang', 'zh') == 'en':
+ default_server_addr = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps(
+ "server ip", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_server_addr = default_server_addr.replace(
+ '"value": ""', '"value": "localhost"')
+ default_http_port = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps(
+ "server port", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_http_port = default_http_port.replace(
+ '"value": ""', '"value": "{}"'.format(http_port))
+ default_metrics_port = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps(
+ "metrics port", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_metrics_port = default_metrics_port.replace(
+ '"value": ""',
+ '"value": "{}"'.format(metrics_port))
+ default_model_name = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps(
+ "model name", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_model_name = default_model_name.replace(
+ '"value": ""', '"value": "{}"'.format(model_name))
+ default_model_version = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps("model version",
+ ensure_ascii=True).replace(
+ '\\', '\\\\')),
+ content).group(0)
+ cur_model_version = default_model_version.replace(
+ '"value": ""', '"value": "{}"'.format('1'))
+ content = content.replace(default_server_addr,
+ cur_server_addr)
+ else:
+ default_server_addr = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps("服务ip", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_server_addr = default_server_addr.replace(
+ '"value": ""', '"value": "localhost"')
+ default_http_port = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps(
+ "推理服务端口", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_http_port = default_http_port.replace(
+ '"value": ""', '"value": "{}"'.format(http_port))
+ default_metrics_port = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps(
+ "性能服务端口", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_metrics_port = default_metrics_port.replace(
+ '"value": ""',
+ '"value": "{}"'.format(metrics_port))
+ default_model_name = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps("模型名称", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_model_name = default_model_name.replace(
+ '"value": ""', '"value": "{}"'.format(model_name))
+ default_model_version = re.search(
+ '"label": {}.*?"value": "".*?}}'.format(
+ json.dumps("模型版本", ensure_ascii=True).replace(
+ '\\', '\\\\')), content).group(0)
+ cur_model_version = default_model_version.replace(
+ '"value": ""', '"value": "{}"'.format('1'))
+ content = content.replace(default_server_addr,
+ cur_server_addr)
if http_port:
content = content.replace(default_http_port,
cur_http_port)