diff --git a/README.md b/README.md index 86d7eb5d614729d7ec3a0816d2114e5273fd7aed..a95dd6a8697c77e47071a5b9ac6c86502aee98b2 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ Here, we use `curl` to send a HTTP POST request to the service we just started. ``` shell -curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' http://127.0.0.1:9292/uci/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' http://127.0.0.1:9292/uci/prediction ``` ### RPC service @@ -133,7 +133,7 @@ python lac_web_service.py jieba_server_model/ lac_workdir 9292 ``` - **Request sample**: ``` shell -curl -H "Content-Type:application/json" -X POST -d '{"words": "我爱北京天安门", "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "我爱北京天安门"}], "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction ``` - **Request result**: ``` shell diff --git a/README_CN.md b/README_CN.md index 641f2eff5f8da6d513dcf5a8d0cefb851d65490a..2b91ab9e75bf6ffedc2df421b1cb40cc651bf8c7 100644 --- a/README_CN.md +++ b/README_CN.md @@ -92,7 +92,7 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po ``` shell -curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' http://127.0.0.1:9292/uci/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' http://127.0.0.1:9292/uci/prediction ```

RPC服务

@@ -138,7 +138,7 @@ python lac_web_service.py jieba_server_model/ lac_workdir 9292 ``` - **客户端请求示例**: ``` shell -curl -H "Content-Type:application/json" -X POST -d '{"words": "我爱北京天安门", "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "我爱北京天安门"}], "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction ``` - **返回结果示例**: ``` shell diff --git a/python/examples/bert/README.md b/python/examples/bert/README.md index 0b9ec5649491165669579044e95def0e766bca1a..d598fc3b057c85d80e8d10549f7c5b0cf1e725fb 100644 --- a/python/examples/bert/README.md +++ b/python/examples/bert/README.md @@ -69,7 +69,7 @@ set environmental variable to specify which gpus are used, the command above mea ### HTTP Inference ``` -curl -H "Content-Type:application/json" -X POST -d '{"words": "hello", "fetch":["pooled_output"]}' http://127.0.0.1:9292/bert/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "hello"}], "fetch":["pooled_output"]}' http://127.0.0.1:9292/bert/prediction ``` ### Benchmark diff --git a/python/examples/bert/README_CN.md b/python/examples/bert/README_CN.md index fb74b024113474f2ebc454f5ef341755135fea6b..7f1d2911ba4a5017137e659fe1f1367e64026de4 100644 --- a/python/examples/bert/README_CN.md +++ b/python/examples/bert/README_CN.md @@ -65,7 +65,7 @@ head data-c.txt | python bert_client.py --model bert_seq128_client/serving_clien ### 执行预测 ``` -curl -H "Content-Type:application/json" -X POST -d '{"words": "hello", "fetch":["pooled_output"]}' http://127.0.0.1:9292/bert/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "hello"}], "fetch":["pooled_output"]}' http://127.0.0.1:9292/bert/prediction ``` ### Benchmark diff --git a/python/examples/fit_a_line/README.md b/python/examples/fit_a_line/README.md index 8ea146e9b7a8e781cbebd004bd54c6e0adfba7c2..acc51938e39fc7b758071292e06b195ecfa558cb 100644 --- a/python/examples/fit_a_line/README.md +++ b/python/examples/fit_a_line/README.md @@ -46,5 +46,5 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po ### Client prediction ``` shell -curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' http://127.0.0.1:9393/uci/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' http://127.0.0.1:9393/uci/prediction ``` diff --git a/python/examples/fit_a_line/README_CN.md b/python/examples/fit_a_line/README_CN.md index 3b97005bce14f9794b831066a1be2750d895e4f6..b18b7204ef2c678ac2811c2bc78df611e0dc538b 100644 --- a/python/examples/fit_a_line/README_CN.md +++ b/python/examples/fit_a_line/README_CN.md @@ -47,5 +47,5 @@ python -m paddle_serving_server.serve --model uci_housing_model --thread 10 --po ### 客户端预测 ``` shell -curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' http://127.0.0.1:9393/uci/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' http://127.0.0.1:9393/uci/prediction ``` diff --git a/python/examples/imagenet/image_http_client.py b/python/examples/imagenet/image_http_client.py index d920eccb06cc9ad4a87237792a1e688fd76b0d6e..61b021be246dc4b843e608dcea21418419731b49 100644 --- a/python/examples/imagenet/image_http_client.py +++ b/python/examples/imagenet/image_http_client.py @@ -27,7 +27,7 @@ def predict(image_path, server): image = base64.b64encode(open(image_path).read()) else: image = base64.b64encode(open(image_path, "rb").read()).decode("utf-8") - req = json.dumps({"image": image, "fetch": ["score"]}) + req = json.dumps({"feed": [{"image": image}], "fetch": ["score"]}) r = requests.post( server, data=req, headers={"Content-Type": "application/json"}) try: diff --git a/python/examples/imdb/README.md b/python/examples/imdb/README.md index 8867cc8fde00a59984d330439e3ee491e846df54..5f4d204d368a98cb47d4dac2ff3d481e519adb9d 100644 --- a/python/examples/imdb/README.md +++ b/python/examples/imdb/README.md @@ -28,7 +28,7 @@ python text_classify_service.py imdb_cnn_model/ workdir/ 9292 imdb.vocab ### HTTP Infer ``` -curl -H "Content-Type:application/json" -X POST -d '{"words": "i am very sad | 0", "fetch":["prediction"]}' http://127.0.0.1:9292/imdb/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "i am very sad | 0"}], "fetch":["prediction"]}' http://127.0.0.1:9292/imdb/prediction ``` ### Benchmark diff --git a/python/examples/imdb/README_CN.md b/python/examples/imdb/README_CN.md index 06e3de7206f88f6e8f59aaca2215641805a9a5cb..2b79938bbf0625786033d13ec2960ad2bc73acda 100644 --- a/python/examples/imdb/README_CN.md +++ b/python/examples/imdb/README_CN.md @@ -27,7 +27,7 @@ python text_classify_service.py imdb_cnn_model/ workdir/ 9292 imdb.vocab ### 执行预测 ``` -curl -H "Content-Type:application/json" -X POST -d '{"words": "i am very sad | 0", "fetch":["prediction"]}' http://127.0.0.1:9292/imdb/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "i am very sad | 0"}], "fetch":["prediction"]}' http://127.0.0.1:9292/imdb/prediction ``` ### Benchmark diff --git a/python/examples/lac/README.md b/python/examples/lac/README.md index a0553b24ab377ed7d274583ed84827f2f1a985af..bc420186a09dfd0066c1abf0c0d95063e9cb0699 100644 --- a/python/examples/lac/README.md +++ b/python/examples/lac/README.md @@ -28,5 +28,5 @@ python lac_web_service.py jieba_server_model/ lac_workdir 9292 ### HTTP Infer ``` -curl -H "Content-Type:application/json" -X POST -d '{"words": "我爱北京天安门", "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "我爱北京天安门"}], "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction ``` diff --git a/python/examples/lac/README_CN.md b/python/examples/lac/README_CN.md index 98f2d36497dbf5dea8e34de355ae96a7f529349a..449f474ca291053eb6880166c52814c9d4180f36 100644 --- a/python/examples/lac/README_CN.md +++ b/python/examples/lac/README_CN.md @@ -28,5 +28,5 @@ python lac_web_service.py jieba_server_model/ lac_workdir 9292 ### 执行HTTP预测 ``` -curl -H "Content-Type:application/json" -X POST -d '{"words": "我爱北京天安门", "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction +curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "我爱北京天安门"}], "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction ``` diff --git a/python/paddle_serving_server/web_service.py b/python/paddle_serving_server/web_service.py index a03649725b1c41ca94b8ef495a2fc80e8293aba0..7e69b241f50255aa69d34c1405b72eacb675be04 100755 --- a/python/paddle_serving_server/web_service.py +++ b/python/paddle_serving_server/web_service.py @@ -78,14 +78,15 @@ class WebService(object): if "fetch" not in request.json: abort(400) try: - feed, fetch = self.preprocess(request.json, request.json["fetch"]) + feed, fetch = self.preprocess(request.json["feed"], + request.json["fetch"]) if isinstance(feed, dict) and "fetch" in feed: del feed["fetch"] fetch_map = self.client.predict(feed=feed, fetch=fetch) - fetch_map = self.postprocess( - feed=request.json, fetch=fetch, fetch_map=fetch_map) for key in fetch_map: fetch_map[key] = fetch_map[key].tolist() + fetch_map = self.postprocess( + feed=feed, fetch=fetch, fetch_map=fetch_map) result = {"result": fetch_map} except ValueError: result = {"result": "Request Value Error"} @@ -118,8 +119,8 @@ class WebService(object): threaded=False, processes=4) - def preprocess(self, feed={}, fetch=[]): + def preprocess(self, feed=[], fetch=[]): return feed, fetch - def postprocess(self, feed={}, fetch=[], fetch_map=None): + def postprocess(self, feed=[], fetch=[], fetch_map=None): return fetch_map diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index eb1ecfd8faaf34a6bf2955af46d5a8cf09085ad7..2ec996b1db89bdff3c4550caa566bec5af2d9506 100644 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -123,14 +123,16 @@ class WebService(object): if "fetch" not in request.json: abort(400) try: - feed, fetch = self.preprocess(request.json, request.json["fetch"]) + feed, fetch = self.preprocess(request.json["feed"], + request.json["fetch"]) if isinstance(feed, dict) and "fetch" in feed: del feed["fetch"] fetch_map = self.client.predict(feed=feed, fetch=fetch) - fetch_map = self.postprocess( - feed=request.json, fetch=fetch, fetch_map=fetch_map) for key in fetch_map: fetch_map[key] = fetch_map[key].tolist() + result = self.postprocess( + feed=feed, fetch=fetch, fetch_map=fetch_map) + result = {"result": result} result = {"result": fetch_map} except ValueError: result = {"result": "Request Value Error"} @@ -167,8 +169,8 @@ class WebService(object): threaded=False, processes=4) - def preprocess(self, feed={}, fetch=[]): + def preprocess(self, feed=[], fetch=[]): return feed, fetch - def postprocess(self, feed={}, fetch=[], fetch_map=None): + def postprocess(self, feed=[], fetch=[], fetch_map=None): return fetch_map diff --git a/tools/serving_build.sh b/tools/serving_build.sh index 1e47b8f4fe26c689b5d6680c1478740201b335b9..a39e5a63f91ba694a20973bba9e3fe440446be20 100644 --- a/tools/serving_build.sh +++ b/tools/serving_build.sh @@ -154,9 +154,9 @@ function python_test_fit_a_line() { unsetproxy # maybe the proxy is used on iPipe, which makes web-test failed. check_cmd "python -m paddle_serving_server.serve --model uci_housing_model --name uci --port 9393 --thread 4 --name uci > /dev/null &" sleep 5 # wait for the server to start - check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction" + check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction" # check http code - http_code=`curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction` + http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction` setproxy # recover proxy state kill_server_process if [ ${http_code} -ne 200 ]; then @@ -171,14 +171,14 @@ function python_test_fit_a_line() { sleep 5 # wait for the server to start check_cmd "python test_client.py uci_housing_client/serving_client_conf.prototxt > /dev/null" kill_server_process - + # test web unsetproxy # maybe the proxy is used on iPipe, which makes web-test failed. check_cmd "python -m paddle_serving_server_gpu.serve --model uci_housing_model --port 9393 --thread 2 --gpu_ids 0 --name uci > /dev/null &" sleep 5 # wait for the server to start - check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction" + check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"x\": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], \"fetch\":[\"price\"]}' http://127.0.0.1:9393/uci/prediction" # check http code - http_code=`curl -H "Content-Type:application/json" -X POST -d '{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction` + http_code=`curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"x": [0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584, 0.6283, 0.4919, 0.1856, 0.0795, -0.0332]}], "fetch":["price"]}' -s -w "%{http_code}" -o /dev/null http://127.0.0.1:9393/uci/prediction` setproxy # recover proxy state kill_server_process if [ ${http_code} -ne 200 ]; then @@ -211,11 +211,11 @@ function python_run_criteo_ctr_with_cube() { check_cmd "mv models/ctr_serving_model_kv ./" check_cmd "mv models/data ./cube/" check_cmd "mv models/ut_data ./" - cp ../../../build-server-$TYPE/output/bin/cube* ./cube/ + cp ../../../build-server-$TYPE/output/bin/cube* ./cube/ mkdir -p $PYTHONROOT/lib/python2.7/site-packages/paddle_serving_server/serving-cpu-avx-openblas-0.1.3/ yes | cp ../../../build-server-$TYPE/output/demo/serving/bin/serving $PYTHONROOT/lib/python2.7/site-packages/paddle_serving_server/serving-cpu-avx-openblas-0.1.3/ sh cube_prepare.sh & - check_cmd "mkdir work_dir1 && cp cube/conf/cube.conf ./work_dir1/" + check_cmd "mkdir work_dir1 && cp cube/conf/cube.conf ./work_dir1/" python test_server.py ctr_serving_model_kv & sleep 5 check_cmd "python test_client.py ctr_client_conf/serving_client_conf.prototxt ./ut_data >score" @@ -297,7 +297,7 @@ function python_test_bert() { # kill_server_process # ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill # ps -ef | grep "serving" | grep -v grep | awk '{print $2}' | xargs kill - echo "bert RPC inference pass" + echo "bert RPC inference pass" ;; GPU) export CUDA_VISIBLE_DEVICES=0 @@ -350,7 +350,7 @@ function python_test_imdb() { check_cmd "python text_classify_service.py imdb_cnn_model/workdir/9292 imdb.vocab &" sleep 5 - check_cmd "curl -H "Content-Type:application/json" -X POST -d '{"words": "i am very sad | 0", "fetch":["prediction"]}' http://127.0.0.1:9292/imdb/prediction" + check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"words\": \"i am very sad | 0\"}], \"fetch\":[\"prediction\"]}' http://127.0.0.1:9292/imdb/prediction" kill_server_process ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill ps -ef | grep "text_classify_service.py" | grep -v grep | awk '{print $2}' | xargs kill @@ -385,7 +385,7 @@ function python_test_lac() { check_cmd "python lac_web_service.py jieba_server_model/ lac_workdir 9292 &" sleep 5 - check_cmd "curl -H "Content-Type:application/json" -X POST -d '{"words": "我爱北京天安门", "fetch":["word_seg"]}' http://127.0.0.1:9292/lac/prediction" + check_cmd "curl -H \"Content-Type:application/json\" -X POST -d '{\"feed\":[{\"words\": \"i am very sad | 0\"}], \"fetch\":[\"prediction\"]}' http://127.0.0.1:9292/imdb/prediction" kill_server_process ps -ef | grep "paddle_serving_server" | grep -v grep | awk '{print $2}' | xargs kill ps -ef | grep "lac_web_service" | grep -v grep | awk '{print $2}' | xargs kill @@ -411,8 +411,8 @@ function python_run_test() { python_test_fit_a_line $TYPE # pwd: /Serving/python/examples python_run_criteo_ctr_with_cube $TYPE # pwd: /Serving/python/examples python_test_bert $TYPE # pwd: /Serving/python/examples - python_test_imdb $TYPE # pwd: /Serving/python/examples - python_test_lac $TYPE + python_test_imdb $TYPE # pwd: /Serving/python/examples + python_test_lac $TYPE echo "test python $TYPE part finished as expected." cd ../.. # pwd: /Serving }