提交 1adab94b 编写于 作者: Z zhangjun

update python code

上级 6829588e
......@@ -63,11 +63,11 @@ endif()
if (SERVER)
if(CUDA_VERSION EQUAL 10.1)
set(SUFFIX 101)
set(VERSION_SUFFIX 101)
elseif(CUDA_VERSION EQUAL 10.2)
set(SUFFIX 102)
set(VERSION_SUFFIX 102)
elseif(CUDA_VERSION EQUAL 11.0)
set(SUFFIX 11)
set(VERSION_SUFFIX 11)
endif()
add_custom_command(
......
......@@ -35,7 +35,7 @@ def update_info(file_name, feature, info):
if len(sys.argv) > 2:
update_info("paddle_serving_server/version.py", "cuda_version",
update_info("paddle_serving_server/version.py", "version_suffix",
sys.argv[2])
path = "paddle_serving_" + sys.argv[1]
......
......@@ -13,8 +13,19 @@
# limitations under the License.
# pylint: disable=doc-string-missing
SERVER_VERSION = "0.0.0"
from . import dag
from . import monitor
from . import rpc_service
from . import serve
from . import web_service
from . import version
from dag import *
from monitor import *
from rpc_service import *
from serve import *
from web_service import *
from version import *
__version__ = SERVER_VERSION
cuda_version = "9"
commit_id = ""
\ No newline at end of file
SERVER_VERSION = "0.0.0"
__version__ = SERVER_VERSION
\ No newline at end of file
......@@ -23,7 +23,7 @@ import json
import base64
import time
from multiprocessing import Pool, Process
from paddle_serving_server_gpu import serve_args
from paddle_serving_server import serve_args
from flask import Flask, request
import sys
if sys.version_info.major == 2:
......
......@@ -2,7 +2,7 @@
import os
import tarfile
import socket
import paddle_serving_server_gpu as paddle_serving_server
import paddle_serving_server as paddle_serving_server
import time
from .version import serving_server_version
from contextlib import closing
......@@ -157,18 +157,19 @@ class Server(object):
if device == "arm":
engine.use_lite = self.use_lite
engine.use_xpu = self.use_xpu
if device == "cpu":
if use_encryption_model:
engine.type = "FLUID_CPU_ANALYSIS_ENCRPT"
else:
engine.type = "FLUID_CPU_ANALYSIS" + suffix
elif device == "gpu":
if use_encryption_model:
engine.type = "FLUID_GPU_ANALYSIS_ENCRPT"
else:
engine.type = "FLUID_GPU_ANALYSIS" + suffix
elif device == "arm":
engine.type = "FLUID_ARM_ANALYSIS" + suffix
engine.type = "PaddleInferenceEngine"
# if device == "cpu":
# if use_encryption_model:
# engine.type = "FLUID_CPU_ANALYSIS_ENCRPT"
# else:
# engine.type = "FLUID_CPU_ANALYSIS" + suffix
# elif device == "gpu":
# if use_encryption_model:
# engine.type = "FLUID_GPU_ANALYSIS_ENCRPT"
# else:
# engine.type = "FLUID_GPU_ANALYSIS" + suffix
# elif device == "arm":
# engine.type = "FLUID_ARM_ANALYSIS" + suffix
self.model_toolkit_conf.engines.extend([engine])
def _prepare_infer_service(self, port):
......@@ -290,6 +291,7 @@ class Server(object):
version_file = open("{}/version.py".format(self.module_path), "r")
import re
for line in version_file.readlines():
# to add, version_suffix
if re.match("cuda_version", line):
cuda_version = line.split("\"")[1]
if cuda_version == "101" or cuda_version == "102":
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Paddle Serving Client version string """
serving_client_version = "0.0.0"
serving_server_version = "0.0.0"
module_proto_version = "0.0.0"
cuda_version = "9"
commit_id = ""
......@@ -18,15 +18,15 @@ from flask import Flask, request, abort
from contextlib import closing
from multiprocessing import Pool, Process, Queue
from paddle_serving_client import Client
from paddle_serving_server_gpu import OpMaker, OpSeqMaker, Server
from paddle_serving_server_gpu.serve import start_multi_card
from paddle_serving_server import OpMaker, OpSeqMaker, Server
from paddle_serving_server.serve import start_multi_card
import socket
import sys
import numpy as np
import paddle_serving_server_gpu as serving
import paddle_serving_server as serving
from paddle_serving_server_gpu import pipeline
from paddle_serving_server_gpu.pipeline import Op
from paddle_serving_server import pipeline
from paddle_serving_server.pipeline import Op
def port_is_available(port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册