diff --git a/cmake/paddlepaddle.cmake b/cmake/paddlepaddle.cmake index 7670444ed1e021376fa44491973bb748cf611ecf..5a164e93437e59e9b93ad6472755adffea8421ae 100644 --- a/cmake/paddlepaddle.cmake +++ b/cmake/paddlepaddle.cmake @@ -31,7 +31,7 @@ message( "WITH_GPU = ${WITH_GPU}") # Paddle Version should be one of: # latest: latest develop build # version number like 1.5.2 -SET(PADDLE_VERSION "1.7.2") +SET(PADDLE_VERSION "1.8.4") if (WITH_GPU) SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}-gpu-cuda${CUDA_VERSION_MAJOR}-cudnn7-avx-mkl") diff --git a/python/examples/ocr/README.md b/python/examples/ocr/README.md index 43dcb30e1c8b86d5ec6f9f0d9ad2a6006a988d52..a0fc9f60160506183076233f33face1732a278c7 100644 --- a/python/examples/ocr/README.md +++ b/python/examples/ocr/README.md @@ -63,7 +63,7 @@ Dataset: RCTW 500 sample images | Serving web service | 8.69 | 13.41 | 109.97 | 2.82 | 87.76 | 4.29 | 3.98 | 78.51 | 3.66 | 4.12 | 181.02 | 136.49 | 317.51 | | Serving Debugger web service | 8.73 | 16.42 | 115.27 | 2.93 | 20.63 | 3.97 | 4.48 | 13.84 | 3.60 | 6.91 | 49.45 | 147.33 | 196.78 | -## Appendix: Det or Rec only +## Appendix: For Users who want to launch Det or Rec only if you are going to detect images not recognize it or directly recognize the words from images. We also provide Det and Rec server for you. ### Det Server diff --git a/python/examples/ocr/det_debugger_server.py b/python/examples/ocr/det_debugger_server.py index 78970af4d1a8a89f976f48f2c29ec97732afa0ce..913a0bb4c9a099cbef886beb3889337d024d10d6 100644 --- a/python/examples/ocr/det_debugger_server.py +++ b/python/examples/ocr/det_debugger_server.py @@ -23,7 +23,7 @@ from paddle_serving_app.reader import Div, Normalize, Transpose from paddle_serving_app.reader import DBPostProcess, FilterBoxes if sys.argv[1] == 'gpu': from paddle_serving_server_gpu.web_service import WebService -elif sys.argv[1] == 'cpu' +elif sys.argv[1] == 'cpu': from paddle_serving_server.web_service import WebService import time import re @@ -69,7 +69,8 @@ ocr_service = OCRService(name="ocr") ocr_service.load_model_config("ocr_det_model") if sys.argv[1] == 'gpu': ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) + ocr_service.prepare_server( + workdir="workdir", port=9292, device="gpu", gpuid=0) elif sys.argv[1] == 'cpu': ocr_service.prepare_server(workdir="workdir", port=9292) ocr_service.init_det() diff --git a/python/examples/ocr/det_web_server.py b/python/examples/ocr/det_web_server.py index 14be74130dcb413c31a3e76c150d74f65575f451..38c6541c70e9871d13dd81751d4edb2bc771a904 100644 --- a/python/examples/ocr/det_web_server.py +++ b/python/examples/ocr/det_web_server.py @@ -70,7 +70,8 @@ ocr_service = OCRService(name="ocr") ocr_service.load_model_config("ocr_det_model") if sys.argv[1] == 'gpu': ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) + ocr_service.prepare_server( + workdir="workdir", port=9292, device="gpu", gpuid=0) elif sys.argv[1] == 'cpu': ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu") ocr_service.init_det() diff --git a/python/examples/ocr/ocr_web_server.py b/python/examples/ocr/ocr_web_server.py index 6c0de44661958a6425f57039261969551ff552c5..de83ca94a4c1f55d886175d9a87b6a34db34c2a5 100644 --- a/python/examples/ocr/ocr_web_server.py +++ b/python/examples/ocr/ocr_web_server.py @@ -95,7 +95,8 @@ ocr_service = OCRService(name="ocr") ocr_service.load_model_config("ocr_rec_model") if sys.argv[1] == 'gpu': ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) + ocr_service.prepare_server( + workdir="workdir", port=9292, device="gpu", gpuid=0) elif sys.argv[1] == 'cpu': ocr_service.prepare_server(workdir="workdir", port=9292) ocr_service.init_det_client( diff --git a/python/examples/ocr/rec_web_server.py b/python/examples/ocr/rec_web_server.py index 0f4e9f6d264ed602f387bfaf0303cd59af7823fa..aae97fd9e3fbd1d29b6cf2ef160b92a522db2e22 100644 --- a/python/examples/ocr/rec_web_server.py +++ b/python/examples/ocr/rec_web_server.py @@ -70,7 +70,8 @@ ocr_service.load_model_config("ocr_rec_model") ocr_service.init_rec() if sys.argv[1] == 'gpu': ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) + ocr_service.prepare_server( + workdir="workdir", port=9292, device="gpu", gpuid=0) elif sys.argv[1] == 'cpu': ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu") ocr_service.run_rpc_service() diff --git a/python/paddle_serving_app/reader/image_reader.py b/python/paddle_serving_app/reader/image_reader.py index 50c0753c27f845e784676b54ae7e029bec2a4ec4..38a1766433848c800ad40e1be7e79c2ac7989199 100644 --- a/python/paddle_serving_app/reader/image_reader.py +++ b/python/paddle_serving_app/reader/image_reader.py @@ -317,7 +317,7 @@ class RCNNPostprocess(object): self.clip_bbox([xmin, ymin, xmax, ymax]) w = xmax - xmin h = ymax - ymin - im_shape = t['im_shape'][0][i].tolist() + im_shape = t['im_shape'].tolist() im_height, im_width = int(im_shape[0]), int(im_shape[1]) xmin *= im_width ymin *= im_height @@ -420,7 +420,7 @@ class RCNNPostprocess(object): for key in image_with_bbox: if key == "image": continue - if ".lod" in key: + if ".lod" in key or "im_shape" in key: continue fetch_name = key bbox_result = self._get_bbox_result(image_with_bbox, fetch_name, diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index f576b49d1e83167ffdd3a73e94395da4ff991d72..86a77fac8433214262ee1cc14099f5772848d32d 100755 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -91,6 +91,8 @@ class WebService(object): request.json["fetch"]) if isinstance(feed, dict) and "fetch" in feed: del feed["fetch"] + if len(feed) == 0: + raise ValueError("empty input") fetch_map = self.client.predict(feed=feed, fetch=fetch) result = self.postprocess( feed=request.json["feed"], fetch=fetch, fetch_map=fetch_map) @@ -122,6 +124,32 @@ class WebService(object): self.app_instance = app_instance + def run_debugger_service(self): + import socket + localIP = socket.gethostbyname(socket.gethostname()) + print("web service address:") + print("http://{}:{}/{}/prediction".format(localIP, self.port, + self.name)) + app_instance = Flask(__name__) + + @app_instance.before_first_request + def init(): + self._launch_local_predictor() + + service_name = "/" + self.name + "/prediction" + + @app_instance.route(service_name, methods=["POST"]) + def run(): + return self.get_prediction(request) + + self.app_instance = app_instance + + def _launch_local_predictor(self): + from paddle_serving_app.local_predict import Debugger + self.client = Debugger() + self.client.load_model_config( + "{}".format(self.model_config), gpu=False, profile=False) + def run_web_service(self): self.app_instance.run(host="0.0.0.0", port=self.port, diff --git a/python/requirements.txt b/python/requirements.txt index 697b24fd4db6aff6b30913d8a5d23416dc208c80..d3c7b1893fabdd448fa838d978bfd3269ccc1bae 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,5 +1,6 @@ numpy>=1.12, <=1.16.4 ; python_version<"3.5" google>=2.0.3 +opencv-python==4.2.0.32 protobuf>=3.12.2 grpcio-tools>=1.28.1 grpcio>=1.28.1 diff --git a/python/setup.py.app.in b/python/setup.py.app.in index 1ee1cabb5a572536e6869852e3ab638cda6adcb8..2b7ae9303aa95eace7b0eab99d787ca43af48e00 100644 --- a/python/setup.py.app.in +++ b/python/setup.py.app.in @@ -42,7 +42,7 @@ if '${PACK}' == 'ON': REQUIRED_PACKAGES = [ - 'six >= 1.10.0', 'sentencepiece', 'opencv-python', 'pillow', + 'six >= 1.10.0', 'sentencepiece', 'opencv-python<=4.2.0.32', 'pillow', 'shapely', 'pyclipper' ]