diff --git a/examples/Cpp/PaddleClas/imagenet/README.md b/examples/Cpp/PaddleClas/imagenet/README.md deleted file mode 100755 index eaff522a5ae31eab08786489cbce0fa83f85e91d..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/README.md +++ /dev/null @@ -1,41 +0,0 @@ -## Image Classification - -([简体中文](./README_CN.md)|English) - -The example uses the ResNet50_vd model to perform the imagenet 1000 classification task. - -### Get model config and sample dataset -``` -sh get_model.sh -``` - -### Install preprocess module - -``` -pip3 install paddle_serving_app -``` - - -### Inference Service(Support BRPC-Client/GRPC-Client/Http-Client) - -launch server side -``` -python3 -m paddle_serving_server.serve --model ResNet50_vd_model --port 9696 #cpu inference service -``` - -``` -python3 -m paddle_serving_server.serve --model ResNet50_vd_model --port 9696 --gpu_ids 0 #gpu inference service -``` - -### BRPC-Client -client send inference request -``` -python3 resnet50_rpc_client.py ResNet50_vd_client_config/serving_client_conf.prototxt -``` -*the port of server side in this example is 9696 - -### GRPC-Client/Http-Client -client send inference request -``` -python3 resnet50_http_client.py ResNet50_vd_client_config/serving_client_conf.prototxt -``` diff --git a/examples/Cpp/PaddleClas/imagenet/README_CN.md b/examples/Cpp/PaddleClas/imagenet/README_CN.md deleted file mode 100755 index 642bee3d0cbab98a48f2f09284ea887751752667..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/README_CN.md +++ /dev/null @@ -1,41 +0,0 @@ -## 图像分类示例 - -(简体中文|[English](./README.md)) - -示例中采用ResNet50_vd模型执行imagenet 1000分类任务。 - -### 获取模型配置文件和样例数据 -``` -sh get_model.sh -``` - -### 安装数据预处理模块 - -``` -pip3 install paddle_serving_app -``` - -### 启动服务端(支持BRPC-Client、GRPC-Client、Http-Client) - -启动server端 -``` -python3 -m paddle_serving_server.serve --model ResNet50_vd_model --port 9696 #cpu预测服务 -``` - -``` -python3 -m paddle_serving_server.serve --model ResNet50_vd_model --port 9696 --gpu_ids 0 #gpu预测服务 -``` - -### BRPC-Client预测 -client端进行预测 -``` -python3 resnet50_rpc_client.py ResNet50_vd_client_config/serving_client_conf.prototxt -``` -*server端示例中服务端口为9696端口 - - -### GRPC-Client/Http-Client预测 -client端进行预测 -``` -python3 resnet50_http_client.py ResNet50_vd_client_config/serving_client_conf.prototxt -``` diff --git a/examples/Cpp/PaddleClas/imagenet/benchmark.py b/examples/Cpp/PaddleClas/imagenet/benchmark.py deleted file mode 100644 index 12b013bd2554f24430ad1810f971a340c4b6903e..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/benchmark.py +++ /dev/null @@ -1,127 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# pylint: disable=doc-string-missing - -from __future__ import unicode_literals, absolute_import -import os -import sys -import time -import requests -import json -import base64 -from paddle_serving_client import Client -from paddle_serving_client.utils import MultiThreadRunner -from paddle_serving_client.utils import benchmark_args, show_latency -from paddle_serving_app.reader import Sequential, File2Image, Resize -from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize - -args = benchmark_args() - -seq_preprocess = Sequential([ - File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), - Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) -]) - - -def single_func(idx, resource): - file_list = [] - turns = resource["turns"] - latency_flags = False - if os.getenv("FLAGS_serving_latency"): - latency_flags = True - latency_list = [] - for file_name in os.listdir("./image_data/n01440764"): - file_list.append(file_name) - img_list = [] - for i in range(1000): - img_list.append("./image_data/n01440764/" + file_list[i]) - profile_flags = False - if "FLAGS_profile_client" in os.environ and os.environ[ - "FLAGS_profile_client"]: - profile_flags = True - if args.request == "rpc": - fetch = ["score"] - client = Client() - client.load_client_config(args.model) - client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) - start = time.time() - for i in range(turns): - if args.batch_size >= 1: - l_start = time.time() - feed_batch = [] - i_start = time.time() - for bi in range(args.batch_size): - img = seq_preprocess(img_list[i]) - feed_batch.append({"image": img}) - i_end = time.time() - if profile_flags: - print("PROFILE\tpid:{}\timage_pre_0:{} image_pre_1:{}". - format(os.getpid(), - int(round(i_start * 1000000)), - int(round(i_end * 1000000)))) - - result = client.predict(feed=feed_batch, fetch=fetch) - l_end = time.time() - if latency_flags: - latency_list.append(l_end * 1000 - l_start * 1000) - else: - print("unsupport batch size {}".format(args.batch_size)) - - elif args.request == "http": - py_version = sys.version_info[0] - server = "http://" + resource["endpoint"][idx % len(resource[ - "endpoint"])] + "/image/prediction" - start = time.time() - for i in range(turns): - if py_version == 2: - image = base64.b64encode( - open("./image_data/n01440764/" + file_list[i]).read()) - else: - image_path = "./image_data/n01440764/" + file_list[i] - image = base64.b64encode(open(image_path, "rb").read()).decode( - "utf-8") - req = json.dumps({"feed": [{"image": image}], "fetch": ["score"]}) - r = requests.post( - server, data=req, headers={"Content-Type": "application/json"}) - end = time.time() - if latency_flags: - return [[end - start], latency_list] - return [[end - start]] - - -if __name__ == '__main__': - multi_thread_runner = MultiThreadRunner() - endpoint_list = [ - "127.0.0.1:9292", "127.0.0.1:9293", "127.0.0.1:9294", "127.0.0.1:9295" - ] - turns = 100 - start = time.time() - result = multi_thread_runner.run( - single_func, args.thread, {"endpoint": endpoint_list, - "turns": turns}) - #result = single_func(0, {"endpoint": endpoint_list}) - end = time.time() - total_cost = end - start - avg_cost = 0 - for i in range(args.thread): - avg_cost += result[0][i] - avg_cost = avg_cost / args.thread - print("total cost: {}s".format(end - start)) - print("each thread cost: {}s.".format(avg_cost)) - print("qps: {}samples/s".format(args.batch_size * args.thread * turns / - total_cost)) - if os.getenv("FLAGS_serving_latency"): - show_latency(result[1]) diff --git a/examples/Cpp/PaddleClas/imagenet/benchmark.sh b/examples/Cpp/PaddleClas/imagenet/benchmark.sh deleted file mode 100644 index 99bda3c84b862032a574579d97ef03b59fee0b62..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/benchmark.sh +++ /dev/null @@ -1,50 +0,0 @@ -rm profile_log* -export CUDA_VISIBLE_DEVICES=0,1,2,3 -export FLAGS_profile_server=1 -export FLAGS_profile_client=1 -python -m paddle_serving_server.serve --model $1 --port 9292 --thread 4 --gpu_ids 0,1,2,3 --mem_optim --ir_optim 2> elog > stdlog & - -sleep 5 -gpu_id=0 -#save cpu and gpu utilization log -if [ -d utilization ];then - rm -rf utilization -else - mkdir utilization -fi - -#warm up -$PYTHONROOT/bin/python3 benchmark.py --thread 4 --batch_size 1 --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1 -echo -e "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py - -for thread_num in 1 4 8 16 -do -for batch_size in 1 4 16 64 -do - job_bt=`date '+%Y%m%d%H%M%S'` - nvidia-smi --id=0 --query-compute-apps=used_memory --format=csv -lms 100 > gpu_use.log 2>&1 & - nvidia-smi --id=0 --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & - gpu_memory_pid=$! - $PYTHONROOT/bin/python benchmark.py --thread $thread_num --batch_size $batch_size --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1 - kill ${gpu_memory_pid} - kill `ps -ef|grep used_memory|awk '{print $2}'` - echo "model name :" $1 - echo "thread num :" $thread_num - echo "batch size :" $batch_size - echo "=================Done====================" - echo "model name :$1" >> profile_log - echo "batch size :$batch_size" >> profile_log - job_et=`date '+%Y%m%d%H%M%S'` - awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "MAX_GPU_MEMORY:", max}' gpu_use.log >> profile_log_$1 - awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "GPU_UTILIZATION:", max}' gpu_utilization.log >> profile_log_$1 - rm -rf gpu_use.log gpu_utilization.log - $PYTHONROOT/bin/python ../util/show_profile.py profile $thread_num >> profile_log - tail -n 8 profile >> profile_log - echo "" >> profile_log_$1 -done -done - -#Divided log -awk 'BEGIN{RS="\n\n"}{i++}{print > "ResNet_log_"i}' profile_log_$1 -mkdir $1_log && mv ResNet_log_* $1_log -ps -ef|grep 'serving'|grep -v grep|cut -c 9-15 | xargs kill -9 diff --git a/examples/Cpp/PaddleClas/imagenet/daisy.jpg b/examples/Cpp/PaddleClas/imagenet/daisy.jpg deleted file mode 100644 index 7edeca63e5f32e68550ef720d81f59df58a8eabc..0000000000000000000000000000000000000000 Binary files a/examples/Cpp/PaddleClas/imagenet/daisy.jpg and /dev/null differ diff --git a/examples/Cpp/PaddleClas/imagenet/data/n01440764_10026.JPEG b/examples/Cpp/PaddleClas/imagenet/data/n01440764_10026.JPEG deleted file mode 100644 index b985769e1c1f09585e67291a4926537186a40e49..0000000000000000000000000000000000000000 Binary files a/examples/Cpp/PaddleClas/imagenet/data/n01440764_10026.JPEG and /dev/null differ diff --git a/examples/Cpp/PaddleClas/imagenet/flower.jpg b/examples/Cpp/PaddleClas/imagenet/flower.jpg deleted file mode 100644 index 903f812c4ad87e7f608e895a8e6d26d596cc0b48..0000000000000000000000000000000000000000 Binary files a/examples/Cpp/PaddleClas/imagenet/flower.jpg and /dev/null differ diff --git a/examples/Cpp/PaddleClas/imagenet/get_model.sh b/examples/Cpp/PaddleClas/imagenet/get_model.sh deleted file mode 100644 index e017cc5101771c30f0c83e17f203ac5bff8d8570..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/get_model.sh +++ /dev/null @@ -1,7 +0,0 @@ -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/imagenet-example/ResNet50_vd.tar.gz -tar -xzvf ResNet50_vd.tar.gz -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/imagenet-example/ResNet101_vd.tar.gz -tar -xzvf ResNet101_vd.tar.gz - -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/imagenet-example/image_data.tar.gz -tar -xzvf image_data.tar.gz diff --git a/examples/Cpp/PaddleClas/imagenet/imagenet.label b/examples/Cpp/PaddleClas/imagenet/imagenet.label deleted file mode 100644 index d7146735146ea1894173d6d0e20fb90af36be849..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/imagenet.label +++ /dev/null @@ -1,1000 +0,0 @@ -tench, Tinca tinca, -goldfish, Carassius auratus, -great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias, -tiger shark, Galeocerdo cuvieri, -hammerhead, hammerhead shark, -electric ray, crampfish, numbfish, torpedo, -stingray, -cock, -hen, -ostrich, Struthio camelus, -brambling, Fringilla montifringilla, -goldfinch, Carduelis carduelis, -house finch, linnet, Carpodacus mexicanus, -junco, snowbird, -indigo bunting, indigo finch, indigo bird, Passerina cyanea, -robin, American robin, Turdus migratorius, -bulbul, -jay, -magpie, -chickadee, -water ouzel, dipper, -kite, -bald eagle, American eagle, Haliaeetus leucocephalus, -vulture, -great grey owl, great gray owl, Strix nebulosa, -European fire salamander, Salamandra salamandra, -common newt, Triturus vulgaris, -eft, -spotted salamander, Ambystoma maculatum, -axolotl, mud puppy, Ambystoma mexicanum, -bullfrog, Rana catesbeiana, -tree frog, tree-frog, -tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui, -loggerhead, loggerhead turtle, Caretta caretta, -leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea, -mud turtle, -terrapin, -box turtle, box tortoise, -banded gecko, -common iguana, iguana, Iguana iguana, -American chameleon, anole, Anolis carolinensis, -whiptail, whiptail lizard, -agama, -frilled lizard, Chlamydosaurus kingi, -alligator lizard, -Gila monster, Heloderma suspectum, -green lizard, Lacerta viridis, -African chameleon, Chamaeleo chamaeleon, -Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis, -African crocodile, Nile crocodile, Crocodylus niloticus, -American alligator, Alligator mississipiensis, -triceratops, -thunder snake, worm snake, Carphophis amoenus, -ringneck snake, ring-necked snake, ring snake, -hognose snake, puff adder, sand viper, -green snake, grass snake, -king snake, kingsnake, -garter snake, grass snake, -water snake, -vine snake, -night snake, Hypsiglena torquata, -boa constrictor, Constrictor constrictor, -rock python, rock snake, Python sebae, -Indian cobra, Naja naja, -green mamba, -sea snake, -horned viper, cerastes, sand viper, horned asp, Cerastes cornutus, -diamondback, diamondback rattlesnake, Crotalus adamanteus, -sidewinder, horned rattlesnake, Crotalus cerastes, -trilobite, -harvestman, daddy longlegs, Phalangium opilio, -scorpion, -black and gold garden spider, Argiope aurantia, -barn spider, Araneus cavaticus, -garden spider, Aranea diademata, -black widow, Latrodectus mactans, -tarantula, -wolf spider, hunting spider, -tick, -centipede, -black grouse, -ptarmigan, -ruffed grouse, partridge, Bonasa umbellus, -prairie chicken, prairie grouse, prairie fowl, -peacock, -quail, -partridge, -African grey, African gray, Psittacus erithacus, -macaw, -sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita, -lorikeet, -coucal, -bee eater, -hornbill, -hummingbird, -jacamar, -toucan, -drake, -red-breasted merganser, Mergus serrator, -goose, -black swan, Cygnus atratus, -tusker, -echidna, spiny anteater, anteater, -platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus, -wallaby, brush kangaroo, -koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus, -wombat, -jellyfish, -sea anemone, anemone, -brain coral, -flatworm, platyhelminth, -nematode, nematode worm, roundworm, -conch, -snail, -slug, -sea slug, nudibranch, -chiton, coat-of-mail shell, sea cradle, polyplacophore, -chambered nautilus, pearly nautilus, nautilus, -Dungeness crab, Cancer magister, -rock crab, Cancer irroratus, -fiddler crab, -king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica, -American lobster, Northern lobster, Maine lobster, Homarus americanus, -spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish, -crayfish, crawfish, crawdad, crawdaddy, -hermit crab, -isopod, -white stork, Ciconia ciconia, -black stork, Ciconia nigra, -spoonbill, -flamingo, -little blue heron, Egretta caerulea, -American egret, great white heron, Egretta albus, -bittern, -crane, -limpkin, Aramus pictus, -European gallinule, Porphyrio porphyrio, -American coot, marsh hen, mud hen, water hen, Fulica americana, -bustard, -ruddy turnstone, Arenaria interpres, -red-backed sandpiper, dunlin, Erolia alpina, -redshank, Tringa totanus, -dowitcher, -oystercatcher, oyster catcher, -pelican, -king penguin, Aptenodytes patagonica, -albatross, mollymawk, -grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus, -killer whale, killer, orca, grampus, sea wolf, Orcinus orca, -dugong, Dugong dugon, -sea lion, -Chihuahua, -Japanese spaniel, -Maltese dog, Maltese terrier, Maltese, -Pekinese, Pekingese, Peke, -Shih-Tzu, -Blenheim spaniel, -papillon, -toy terrier, -Rhodesian ridgeback, -Afghan hound, Afghan, -basset, basset hound, -beagle, -bloodhound, sleuthhound, -bluetick, -black-and-tan coonhound, -Walker hound, Walker foxhound, -English foxhound, -redbone, -borzoi, Russian wolfhound, -Irish wolfhound, -Italian greyhound, -whippet, -Ibizan hound, Ibizan Podenco, -Norwegian elkhound, elkhound, -otterhound, otter hound, -Saluki, gazelle hound, -Scottish deerhound, deerhound, -Weimaraner, -Staffordshire bullterrier, Staffordshire bull terrier, -American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier, -Bedlington terrier, -Border terrier, -Kerry blue terrier, -Irish terrier, -Norfolk terrier, -Norwich terrier, -Yorkshire terrier, -wire-haired fox terrier, -Lakeland terrier, -Sealyham terrier, Sealyham, -Airedale, Airedale terrier, -cairn, cairn terrier, -Australian terrier, -Dandie Dinmont, Dandie Dinmont terrier, -Boston bull, Boston terrier, -miniature schnauzer, -giant schnauzer, -standard schnauzer, -Scotch terrier, Scottish terrier, Scottie, -Tibetan terrier, chrysanthemum dog, -silky terrier, Sydney silky, -soft-coated wheaten terrier, -West Highland white terrier, -Lhasa, Lhasa apso, -flat-coated retriever, -curly-coated retriever, -golden retriever, -Labrador retriever, -Chesapeake Bay retriever, -German short-haired pointer, -vizsla, Hungarian pointer, -English setter, -Irish setter, red setter, -Gordon setter, -Brittany spaniel, -clumber, clumber spaniel, -English springer, English springer spaniel, -Welsh springer spaniel, -cocker spaniel, English cocker spaniel, cocker, -Sussex spaniel, -Irish water spaniel, -kuvasz, -schipperke, -groenendael, -malinois, -briard, -kelpie, -komondor, -Old English sheepdog, bobtail, -Shetland sheepdog, Shetland sheep dog, Shetland, -collie, -Border collie, -Bouvier des Flandres, Bouviers des Flandres, -Rottweiler, -German shepherd, German shepherd dog, German police dog, alsatian, -Doberman, Doberman pinscher, -miniature pinscher, -Greater Swiss Mountain dog, -Bernese mountain dog, -Appenzeller, -EntleBucher, -boxer, -bull mastiff, -Tibetan mastiff, -French bulldog, -Great Dane, -Saint Bernard, St Bernard, -Eskimo dog, husky, -malamute, malemute, Alaskan malamute, -Siberian husky, -dalmatian, coach dog, carriage dog, -affenpinscher, monkey pinscher, monkey dog, -basenji, -pug, pug-dog, -Leonberg, -Newfoundland, Newfoundland dog, -Great Pyrenees, -Samoyed, Samoyede, -Pomeranian, -chow, chow chow, -keeshond, -Brabancon griffon, -Pembroke, Pembroke Welsh corgi, -Cardigan, Cardigan Welsh corgi, -toy poodle, -miniature poodle, -standard poodle, -Mexican hairless, -timber wolf, grey wolf, gray wolf, Canis lupus, -white wolf, Arctic wolf, Canis lupus tundrarum, -red wolf, maned wolf, Canis rufus, Canis niger, -coyote, prairie wolf, brush wolf, Canis latrans, -dingo, warrigal, warragal, Canis dingo, -dhole, Cuon alpinus, -African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus, -hyena, hyaena, -red fox, Vulpes vulpes, -kit fox, Vulpes macrotis, -Arctic fox, white fox, Alopex lagopus, -grey fox, gray fox, Urocyon cinereoargenteus, -tabby, tabby cat, -tiger cat, -Persian cat, -Siamese cat, Siamese, -Egyptian cat, -cougar, puma, catamount, mountain lion, painter, panther, Felis concolor, -lynx, catamount, -leopard, Panthera pardus, -snow leopard, ounce, Panthera uncia, -jaguar, panther, Panthera onca, Felis onca, -lion, king of beasts, Panthera leo, -tiger, Panthera tigris, -cheetah, chetah, Acinonyx jubatus, -brown bear, bruin, Ursus arctos, -American black bear, black bear, Ursus americanus, Euarctos americanus, -ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus, -sloth bear, Melursus ursinus, Ursus ursinus, -mongoose, -meerkat, mierkat, -tiger beetle, -ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle, -ground beetle, carabid beetle, -long-horned beetle, longicorn, longicorn beetle, -leaf beetle, chrysomelid, -dung beetle, -rhinoceros beetle, -weevil, -fly, -bee, -ant, emmet, pismire, -grasshopper, hopper, -cricket, -walking stick, walkingstick, stick insect, -cockroach, roach, -mantis, mantid, -cicada, cicala, -leafhopper, -lacewing, lacewing fly, -"dragonfly, darning needle, devils darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", -damselfly, -admiral, -ringlet, ringlet butterfly, -monarch, monarch butterfly, milkweed butterfly, Danaus plexippus, -cabbage butterfly, -sulphur butterfly, sulfur butterfly, -lycaenid, lycaenid butterfly, -starfish, sea star, -sea urchin, -sea cucumber, holothurian, -wood rabbit, cottontail, cottontail rabbit, -hare, -Angora, Angora rabbit, -hamster, -porcupine, hedgehog, -fox squirrel, eastern fox squirrel, Sciurus niger, -marmot, -beaver, -guinea pig, Cavia cobaya, -sorrel, -zebra, -hog, pig, grunter, squealer, Sus scrofa, -wild boar, boar, Sus scrofa, -warthog, -hippopotamus, hippo, river horse, Hippopotamus amphibius, -ox, -water buffalo, water ox, Asiatic buffalo, Bubalus bubalis, -bison, -ram, tup, -bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis, -ibex, Capra ibex, -hartebeest, -impala, Aepyceros melampus, -gazelle, -Arabian camel, dromedary, Camelus dromedarius, -llama, -weasel, -mink, -polecat, fitch, foulmart, foumart, Mustela putorius, -black-footed ferret, ferret, Mustela nigripes, -otter, -skunk, polecat, wood pussy, -badger, -armadillo, -three-toed sloth, ai, Bradypus tridactylus, -orangutan, orang, orangutang, Pongo pygmaeus, -gorilla, Gorilla gorilla, -chimpanzee, chimp, Pan troglodytes, -gibbon, Hylobates lar, -siamang, Hylobates syndactylus, Symphalangus syndactylus, -guenon, guenon monkey, -patas, hussar monkey, Erythrocebus patas, -baboon, -macaque, -langur, -colobus, colobus monkey, -proboscis monkey, Nasalis larvatus, -marmoset, -capuchin, ringtail, Cebus capucinus, -howler monkey, howler, -titi, titi monkey, -spider monkey, Ateles geoffroyi, -squirrel monkey, Saimiri sciureus, -Madagascar cat, ring-tailed lemur, Lemur catta, -indri, indris, Indri indri, Indri brevicaudatus, -Indian elephant, Elephas maximus, -African elephant, Loxodonta africana, -lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens, -giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca, -barracouta, snoek, -eel, -coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch, -rock beauty, Holocanthus tricolor, -anemone fish, -sturgeon, -gar, garfish, garpike, billfish, Lepisosteus osseus, -lionfish, -puffer, pufferfish, blowfish, globefish, -abacus, -abaya, -"academic gown, academic robe, judges robe", -accordion, piano accordion, squeeze box, -acoustic guitar, -aircraft carrier, carrier, flattop, attack aircraft carrier, -airliner, -airship, dirigible, -altar, -ambulance, -amphibian, amphibious vehicle, -analog clock, -apiary, bee house, -apron, -ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin, -assault rifle, assault gun, -backpack, back pack, knapsack, packsack, rucksack, haversack, -bakery, bakeshop, bakehouse, -balance beam, beam, -balloon, -ballpoint, ballpoint pen, ballpen, Biro, -Band Aid, -banjo, -bannister, banister, balustrade, balusters, handrail, -barbell, -barber chair, -barbershop, -barn, -barometer, -barrel, cask, -barrow, garden cart, lawn cart, wheelbarrow, -baseball, -basketball, -bassinet, -bassoon, -bathing cap, swimming cap, -bath towel, -bathtub, bathing tub, bath, tub, -beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon, -beacon, lighthouse, beacon light, pharos, -beaker, -bearskin, busby, shako, -beer bottle, -beer glass, -bell cote, bell cot, -bib, -bicycle-built-for-two, tandem bicycle, tandem, -bikini, two-piece, -binder, ring-binder, -binoculars, field glasses, opera glasses, -birdhouse, -boathouse, -bobsled, bobsleigh, bob, -bolo tie, bolo, bola tie, bola, -bonnet, poke bonnet, -bookcase, -bookshop, bookstore, bookstall, -bottlecap, -bow, -bow tie, bow-tie, bowtie, -brass, memorial tablet, plaque, -brassiere, bra, bandeau, -breakwater, groin, groyne, mole, bulwark, seawall, jetty, -breastplate, aegis, egis, -broom, -bucket, pail, -buckle, -bulletproof vest, -bullet train, bullet, -butcher shop, meat market, -cab, hack, taxi, taxicab, -caldron, cauldron, -candle, taper, wax light, -cannon, -canoe, -can opener, tin opener, -cardigan, -car mirror, -carousel, carrousel, merry-go-round, roundabout, whirligig, -"carpenters kit, tool kit", -carton, -car wheel, -cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM, -cassette, -cassette player, -castle, -catamaran, -CD player, -cello, violoncello, -cellular telephone, cellular phone, cellphone, cell, mobile phone, -chain, -chainlink fence, -chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour, -chain saw, chainsaw, -chest, -chiffonier, commode, -chime, bell, gong, -china cabinet, china closet, -Christmas stocking, -church, church building, -cinema, movie theater, movie theatre, movie house, picture palace, -cleaver, meat cleaver, chopper, -cliff dwelling, -cloak, -clog, geta, patten, sabot, -cocktail shaker, -coffee mug, -coffeepot, -coil, spiral, volute, whorl, helix, -combination lock, -computer keyboard, keypad, -confectionery, confectionary, candy store, -container ship, containership, container vessel, -convertible, -corkscrew, bottle screw, -cornet, horn, trumpet, trump, -cowboy boot, -cowboy hat, ten-gallon hat, -cradle, -crane, -crash helmet, -crate, -crib, cot, -Crock Pot, -croquet ball, -crutch, -cuirass, -dam, dike, dyke, -desk, -desktop computer, -dial telephone, dial phone, -diaper, nappy, napkin, -digital clock, -digital watch, -dining table, board, -dishrag, dishcloth, -dishwasher, dish washer, dishwashing machine, -disk brake, disc brake, -dock, dockage, docking facility, -dogsled, dog sled, dog sleigh, -dome, -doormat, welcome mat, -drilling platform, offshore rig, -drum, membranophone, tympan, -drumstick, -dumbbell, -Dutch oven, -electric fan, blower, -electric guitar, -electric locomotive, -entertainment center, -envelope, -espresso maker, -face powder, -feather boa, boa, -file, file cabinet, filing cabinet, -fireboat, -fire engine, fire truck, -fire screen, fireguard, -flagpole, flagstaff, -flute, transverse flute, -folding chair, -football helmet, -forklift, -fountain, -fountain pen, -four-poster, -freight car, -French horn, horn, -frying pan, frypan, skillet, -fur coat, -garbage truck, dustcart, -gasmask, respirator, gas helmet, -gas pump, gasoline pump, petrol pump, island dispenser, -goblet, -go-kart, -golf ball, -golfcart, golf cart, -gondola, -gong, tam-tam, -gown, -grand piano, grand, -greenhouse, nursery, glasshouse, -grille, radiator grille, -grocery store, grocery, food market, market, -guillotine, -hair slide, -hair spray, -half track, -hammer, -hamper, -hand blower, blow dryer, blow drier, hair dryer, hair drier, -hand-held computer, hand-held microcomputer, -handkerchief, hankie, hanky, hankey, -hard disc, hard disk, fixed disk, -harmonica, mouth organ, harp, mouth harp, -harp, -harvester, reaper, -hatchet, -holster, -home theater, home theatre, -honeycomb, -hook, claw, -hoopskirt, crinoline, -horizontal bar, high bar, -horse cart, horse-cart, -hourglass, -iPod, -iron, smoothing iron, -"jack-o-lantern", -jean, blue jean, denim, -jeep, landrover, -jersey, T-shirt, tee shirt, -jigsaw puzzle, -jinrikisha, ricksha, rickshaw, -joystick, -kimono, -knee pad, -knot, -lab coat, laboratory coat, -ladle, -lampshade, lamp shade, -laptop, laptop computer, -lawn mower, mower, -lens cap, lens cover, -letter opener, paper knife, paperknife, -library, -lifeboat, -lighter, light, igniter, ignitor, -limousine, limo, -liner, ocean liner, -lipstick, lip rouge, -Loafer, -lotion, -loudspeaker, speaker, speaker unit, loudspeaker system, speaker system, -"loupe, jewelers loupe", -lumbermill, sawmill, -magnetic compass, -mailbag, postbag, -mailbox, letter box, -maillot, -maillot, tank suit, -manhole cover, -maraca, -marimba, xylophone, -mask, -matchstick, -maypole, -maze, labyrinth, -measuring cup, -medicine chest, medicine cabinet, -megalith, megalithic structure, -microphone, mike, -microwave, microwave oven, -military uniform, -milk can, -minibus, -miniskirt, mini, -minivan, -missile, -mitten, -mixing bowl, -mobile home, manufactured home, -Model T, -modem, -monastery, -monitor, -moped, -mortar, -mortarboard, -mosque, -mosquito net, -motor scooter, scooter, -mountain bike, all-terrain bike, off-roader, -mountain tent, -mouse, computer mouse, -mousetrap, -moving van, -muzzle, -nail, -neck brace, -necklace, -nipple, -notebook, notebook computer, -obelisk, -oboe, hautboy, hautbois, -ocarina, sweet potato, -odometer, hodometer, mileometer, milometer, -oil filter, -organ, pipe organ, -oscilloscope, scope, cathode-ray oscilloscope, CRO, -overskirt, -oxcart, -oxygen mask, -packet, -paddle, boat paddle, -paddlewheel, paddle wheel, -padlock, -paintbrush, -"pajama, pyjama, pjs, jammies", -palace, -panpipe, pandean pipe, syrinx, -paper towel, -parachute, chute, -parallel bars, bars, -park bench, -parking meter, -passenger car, coach, carriage, -patio, terrace, -pay-phone, pay-station, -pedestal, plinth, footstall, -pencil box, pencil case, -pencil sharpener, -perfume, essence, -Petri dish, -photocopier, -pick, plectrum, plectron, -pickelhaube, -picket fence, paling, -pickup, pickup truck, -pier, -piggy bank, penny bank, -pill bottle, -pillow, -ping-pong ball, -pinwheel, -pirate, pirate ship, -pitcher, ewer, -"plane, carpenters plane, woodworking plane", -planetarium, -plastic bag, -plate rack, -plow, plough, -"plunger, plumbers helper", -Polaroid camera, Polaroid Land camera, -pole, -police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria, -poncho, -pool table, billiard table, snooker table, -pop bottle, soda bottle, -pot, flowerpot, -"potters wheel", -power drill, -prayer rug, prayer mat, -printer, -prison, prison house, -projectile, missile, -projector, -puck, hockey puck, -punching bag, punch bag, punching ball, punchball, -purse, -quill, quill pen, -quilt, comforter, comfort, puff, -racer, race car, racing car, -racket, racquet, -radiator, -radio, wireless, -radio telescope, radio reflector, -rain barrel, -recreational vehicle, RV, R.V., -reel, -reflex camera, -refrigerator, icebox, -remote control, remote, -restaurant, eating house, eating place, eatery, -revolver, six-gun, six-shooter, -rifle, -rocking chair, rocker, -rotisserie, -rubber eraser, rubber, pencil eraser, -rugby ball, -rule, ruler, -running shoe, -safe, -safety pin, -saltshaker, salt shaker, -sandal, -sarong, -sax, saxophone, -scabbard, -scale, weighing machine, -school bus, -schooner, -scoreboard, -screen, CRT screen, -screw, -screwdriver, -seat belt, seatbelt, -sewing machine, -shield, buckler, -shoe shop, shoe-shop, shoe store, -shoji, -shopping basket, -shopping cart, -shovel, -shower cap, -shower curtain, -ski, -ski mask, -sleeping bag, -slide rule, slipstick, -sliding door, -slot, one-armed bandit, -snorkel, -snowmobile, -snowplow, snowplough, -soap dispenser, -soccer ball, -sock, -solar dish, solar collector, solar furnace, -sombrero, -soup bowl, -space bar, -space heater, -space shuttle, -spatula, -speedboat, -"spider web, spiders web", -spindle, -sports car, sport car, -spotlight, spot, -stage, -steam locomotive, -steel arch bridge, -steel drum, -stethoscope, -stole, -stone wall, -stopwatch, stop watch, -stove, -strainer, -streetcar, tram, tramcar, trolley, trolley car, -stretcher, -studio couch, day bed, -stupa, tope, -submarine, pigboat, sub, U-boat, -suit, suit of clothes, -sundial, -sunglass, -sunglasses, dark glasses, shades, -sunscreen, sunblock, sun blocker, -suspension bridge, -swab, swob, mop, -sweatshirt, -swimming trunks, bathing trunks, -swing, -switch, electric switch, electrical switch, -syringe, -table lamp, -tank, army tank, armored combat vehicle, armoured combat vehicle, -tape player, -teapot, -teddy, teddy bear, -television, television system, -tennis ball, -thatch, thatched roof, -theater curtain, theatre curtain, -thimble, -thresher, thrasher, threshing machine, -throne, -tile roof, -toaster, -tobacco shop, tobacconist shop, tobacconist, -toilet seat, -torch, -totem pole, -tow truck, tow car, wrecker, -toyshop, -tractor, -trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi, -tray, -trench coat, -tricycle, trike, velocipede, -trimaran, -tripod, -triumphal arch, -trolleybus, trolley coach, trackless trolley, -trombone, -tub, vat, -turnstile, -typewriter keyboard, -umbrella, -unicycle, monocycle, -upright, upright piano, -vacuum, vacuum cleaner, -vase, -vault, -velvet, -vending machine, -vestment, -viaduct, -violin, fiddle, -volleyball, -waffle iron, -wall clock, -wallet, billfold, notecase, pocketbook, -wardrobe, closet, press, -warplane, military plane, -washbasin, handbasin, washbowl, lavabo, wash-hand basin, -washer, automatic washer, washing machine, -water bottle, -water jug, -water tower, -whiskey jug, -whistle, -wig, -window screen, -window shade, -Windsor tie, -wine bottle, -wing, -wok, -wooden spoon, -wool, woolen, woollen, -worm fence, snake fence, snake-rail fence, Virginia fence, -wreck, -yawl, -yurt, -web site, website, internet site, site, -comic book, -crossword puzzle, crossword, -street sign, -traffic light, traffic signal, stoplight, -book jacket, dust cover, dust jacket, dust wrapper, -menu, -plate, -guacamole, -consomme, -hot pot, hotpot, -trifle, -ice cream, icecream, -ice lolly, lolly, lollipop, popsicle, -French loaf, -bagel, beigel, -pretzel, -cheeseburger, -hotdog, hot dog, red hot, -mashed potato, -head cabbage, -broccoli, -cauliflower, -zucchini, courgette, -spaghetti squash, -acorn squash, -butternut squash, -cucumber, cuke, -artichoke, globe artichoke, -bell pepper, -cardoon, -mushroom, -Granny Smith, -strawberry, -orange, -lemon, -fig, -pineapple, ananas, -banana, -jackfruit, jak, jack, -custard apple, -pomegranate, -hay, -carbonara, -chocolate sauce, chocolate syrup, -dough, -meat loaf, meatloaf, -pizza, pizza pie, -potpie, -burrito, -red wine, -espresso, -cup, -eggnog, -alp, -bubble, -cliff, drop, drop-off, -coral reef, -geyser, -lakeside, lakeshore, -promontory, headland, head, foreland, -sandbar, sand bar, -seashore, coast, seacoast, sea-coast, -valley, vale, -volcano, -ballplayer, baseball player, -groom, bridegroom, -scuba diver, -rapeseed, -daisy, -"yellow ladys slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", -corn, -acorn, -hip, rose hip, rosehip, -buckeye, horse chestnut, conker, -coral fungus, -agaric, -gyromitra, -stinkhorn, carrion fungus, -earthstar, -hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa, -bolete, -ear, spike, capitulum, -toilet tissue, toilet paper, bathroom tissue diff --git a/examples/Cpp/PaddleClas/imagenet/resnet50_http_client.py b/examples/Cpp/PaddleClas/imagenet/resnet50_http_client.py deleted file mode 100644 index 77782671b72a1fa41e65ca02b3edeb2a7753face..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/resnet50_http_client.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -from paddle_serving_client import HttpClient -from paddle_serving_app.reader import Sequential, URL2Image, Resize -from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize -import time - -client = HttpClient() -client.load_client_config(sys.argv[1]) -''' -if you want use GRPC-client, set_use_grpc_client(True) -or you can directly use client.grpc_client_predict(...) -as for HTTP-client,set_use_grpc_client(False)(which is default) -or you can directly use client.http_client_predict(...) -''' -#client.set_use_grpc_client(True) -''' -if you want to enable Encrypt Module,uncommenting the following line -''' -#client.use_key("./key") -''' -if you want to compress,uncommenting the following line -''' -#client.set_response_compress(True) -#client.set_request_compress(True) -''' -we recommend use Proto data format in HTTP-body, set True(which is default) -if you want use JSON data format in HTTP-body, set False -''' -#client.set_http_proto(True) -client.connect(["127.0.0.1:9696"]) - -label_dict = {} -label_idx = 0 -with open("imagenet.label") as fin: - for line in fin: - label_dict[label_idx] = line.strip() - label_idx += 1 - -seq = Sequential([ - URL2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), - Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) -]) - -start = time.time() -image_file = "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg" -for i in range(10): - img = seq(image_file) - fetch_map = client.predict( - feed={"image": img}, fetch=["score"], batch=False) - print(fetch_map) - -end = time.time() -print(end - start) diff --git a/examples/Cpp/PaddleClas/imagenet/resnet50_rpc_client.py b/examples/Cpp/PaddleClas/imagenet/resnet50_rpc_client.py deleted file mode 100644 index b23f99175b97a011c3b1c72d3b7358b646c54e68..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/resnet50_rpc_client.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, URL2Image, Resize -from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize -import time - -client = Client() -client.load_client_config(sys.argv[1]) -client.connect(["127.0.0.1:9696"]) - -label_dict = {} -label_idx = 0 -with open("imagenet.label") as fin: - for line in fin: - label_dict[label_idx] = line.strip() - label_idx += 1 - -seq = Sequential([ - URL2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), - Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) -]) - -start = time.time() -image_file = "https://paddle-serving.bj.bcebos.com/imagenet-example/daisy.jpg" -for i in range(10): - img = seq(image_file) - fetch_map = client.predict( - feed={"image": img}, fetch=["score"], batch=False) - prob = max(fetch_map["score"][0]) - label = label_dict[fetch_map["score"][0].tolist().index(prob)].strip( - ).replace(",", "") - print("prediction: {}, probability: {}".format(label, prob)) - -end = time.time() -print(end - start) diff --git a/examples/Cpp/PaddleClas/imagenet/test_image_reader.py b/examples/Cpp/PaddleClas/imagenet/test_image_reader.py deleted file mode 100644 index b3e1aac786360838304e03ec284076ea834ae888..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/imagenet/test_image_reader.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_app.reader.image_reader import String2Image, Base64ToImage, Sequential -import base64 - - -def test_String2Image(): - with open("./daisy.jpg") as f: - img_str = f.read() - seq = Sequential([String2Image()]) - img = seq(img_str) - assert (img.shape == (563, 500, 3)) - - -def test_Base64ToImage(): - with open("./daisy.jpg") as f: - img_str = f.read() - seq = Sequential([Base64ToImage()]) - img = seq(base64.b64encode(img_str)) - assert (img.shape == (563, 500, 3)) - - -if __name__ == "__main__": - test_String2Image() - test_Base64ToImage() diff --git a/examples/Cpp/PaddleClas/mobilenet/README.md b/examples/Cpp/PaddleClas/mobilenet/README.md deleted file mode 100644 index 1a16b749220bdf8e6db0dd8950fc505620cbc8fc..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/mobilenet/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Image Classification - -## Get Model - -``` -python3 -m paddle_serving_app.package --get_model mobilenet_v2_imagenet -tar -xzvf mobilenet_v2_imagenet.tar.gz -``` - -## RPC Service - -### Start Service - -``` -python3 -m paddle_serving_server.serve --model mobilenet_v2_imagenet_model --gpu_ids 0 --port 9393 -``` - -### Client Prediction - -``` -python3 mobilenet_tutorial.py -``` diff --git a/examples/Cpp/PaddleClas/mobilenet/README_CN.md b/examples/Cpp/PaddleClas/mobilenet/README_CN.md deleted file mode 100644 index 68474e5d80afdec183cb5bac0e9ebfc13a7f9ac6..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/mobilenet/README_CN.md +++ /dev/null @@ -1,22 +0,0 @@ -# 图像分类 - -## 获取模型 - -``` -python3 -m paddle_serving_app.package --get_model mobilenet_v2_imagenet -tar -xzvf mobilenet_v2_imagenet.tar.gz -``` - -## RPC 服务 - -### 启动服务端 - -``` -python3 -m paddle_serving_server.serve --model mobilenet_v2_imagenet_model --gpu_ids 0 --port 9393 -``` - -### 客户端预测 - -``` -python3 mobilenet_tutorial.py -``` diff --git a/examples/Cpp/PaddleClas/mobilenet/daisy.jpg b/examples/Cpp/PaddleClas/mobilenet/daisy.jpg deleted file mode 100644 index 7edeca63e5f32e68550ef720d81f59df58a8eabc..0000000000000000000000000000000000000000 Binary files a/examples/Cpp/PaddleClas/mobilenet/daisy.jpg and /dev/null differ diff --git a/examples/Cpp/PaddleClas/mobilenet/mobilenet_tutorial.py b/examples/Cpp/PaddleClas/mobilenet/mobilenet_tutorial.py deleted file mode 100644 index 9550a5ff705d23d3f6a97d8498d5a8b1e4f152b7..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/mobilenet/mobilenet_tutorial.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, File2Image, Resize -from paddle_serving_app.reader import CenterCrop, RGB2BGR, Transpose, Div, Normalize - -client = Client() -client.load_client_config( - "mobilenet_v2_imagenet_client/serving_client_conf.prototxt") -client.connect(["127.0.0.1:9393"]) - -seq = Sequential([ - File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), - Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) -]) - -image_file = "daisy.jpg" -img = seq(image_file) -fetch_map = client.predict(feed={"image": img}, fetch=["feature_map"]) -print(fetch_map["feature_map"].reshape(-1)) diff --git a/examples/Cpp/PaddleClas/resnet_v2_50/README.md b/examples/Cpp/PaddleClas/resnet_v2_50/README.md deleted file mode 100644 index 12144b0ea9836c9eb647fa6482db244f1030354b..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/resnet_v2_50/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Image Classification - -## Get Model - -``` -python3 -m paddle_serving_app.package --get_model resnet_v2_50_imagenet -tar -xzvf resnet_v2_50_imagenet.tar.gz -``` - -## RPC Service - -### Start Service - -``` -python3 -m paddle_serving_server.serve --model resnet_v2_50_imagenet_model --gpu_ids 0 --port 9393 -``` - -### Client Prediction - -``` -python3 resnet50_v2_tutorial.py -``` diff --git a/examples/Cpp/PaddleClas/resnet_v2_50/README_CN.md b/examples/Cpp/PaddleClas/resnet_v2_50/README_CN.md deleted file mode 100644 index fee0e01f3cbac29052e4ae931027574ab6f778a0..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/resnet_v2_50/README_CN.md +++ /dev/null @@ -1,22 +0,0 @@ -# 图像分类 - -## 获取模型 - -``` -python3 -m paddle_serving_app.package --get_model resnet_v2_50_imagenet -tar -xzvf resnet_v2_50_imagenet.tar.gz -``` - -## RPC 服务 - -### 启动服务端 - -``` -python3 -m paddle_serving_server.serve --model resnet_v2_50_imagenet_model --gpu_ids 0 --port 9393 -``` - -### 客户端预测 - -``` -python3 resnet50_v2_tutorial.py -``` diff --git a/examples/Cpp/PaddleClas/resnet_v2_50/benchmark.py b/examples/Cpp/PaddleClas/resnet_v2_50/benchmark.py deleted file mode 100644 index c232d84ed603d441885fdccb5230581632b3daa4..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/resnet_v2_50/benchmark.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# pylint: disable=doc-string-missing - -from __future__ import unicode_literals, absolute_import -import os -import sys -import time -import json -import requests -import numpy as np -from paddle_serving_client import Client -from paddle_serving_client.utils import MultiThreadRunner -from paddle_serving_client.utils import benchmark_args, show_latency -from paddle_serving_app.reader import Sequential, File2Image, Resize, CenterCrop -from paddle_serving_app.reader import RGB2BGR, Transpose, Div, Normalize - -args = benchmark_args() - - -def single_func(idx, resource): - total_number = 0 - profile_flags = False - latency_flags = False - if os.getenv("FLAGS_profile_client"): - profile_flags = True - if os.getenv("FLAGS_serving_latency"): - latency_flags = True - latency_list = [] - - if args.request == "rpc": - client = Client() - client.load_client_config(args.model) - client.connect([resource["endpoint"][idx % len(resource["endpoint"])]]) - start = time.time() - for i in range(turns): - if args.batch_size >= 1: - l_start = time.time() - seq = Sequential([ - File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), - Transpose((2, 0, 1)), Div(255), Normalize( - [0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) - ]) - image_file = "daisy.jpg" - img = seq(image_file) - feed_data = np.array(img) - feed_data = np.expand_dims(feed_data, 0).repeat( - args.batch_size, axis=0) - result = client.predict( - feed={"image": feed_data}, - fetch=["save_infer_model/scale_0.tmp_0"], - batch=True) - l_end = time.time() - if latency_flags: - latency_list.append(l_end * 1000 - l_start * 1000) - total_number = total_number + 1 - else: - print("unsupport batch size {}".format(args.batch_size)) - - else: - raise ValueError("not implemented {} request".format(args.request)) - end = time.time() - if latency_flags: - return [[end - start], latency_list, [total_number]] - else: - return [[end - start]] - - -if __name__ == '__main__': - multi_thread_runner = MultiThreadRunner() - endpoint_list = ["127.0.0.1:9393"] - turns = 1 - start = time.time() - result = multi_thread_runner.run( - single_func, args.thread, {"endpoint": endpoint_list, - "turns": turns}) - end = time.time() - total_cost = end - start - total_number = 0 - avg_cost = 0 - for i in range(args.thread): - avg_cost += result[0][i] - total_number += result[2][i] - avg_cost = avg_cost / args.thread - - print("total cost-include init: {}s".format(total_cost)) - print("each thread cost: {}s. ".format(avg_cost)) - print("qps: {}samples/s".format(args.batch_size * total_number / ( - avg_cost * args.thread))) - print("qps(request): {}samples/s".format(total_number / (avg_cost * - args.thread))) - print("total count: {} ".format(total_number)) - if os.getenv("FLAGS_serving_latency"): - show_latency(result[1]) diff --git a/examples/Cpp/PaddleClas/resnet_v2_50/benchmark.sh b/examples/Cpp/PaddleClas/resnet_v2_50/benchmark.sh deleted file mode 100644 index 0f94276f4ad70e27addfadd2fe65b2056f2d30b3..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/resnet_v2_50/benchmark.sh +++ /dev/null @@ -1,58 +0,0 @@ -rm profile_log* -rm -rf resnet_log* -export CUDA_VISIBLE_DEVICES=0,1,2,3 -export FLAGS_profile_server=1 -export FLAGS_profile_client=1 -export FLAGS_serving_latency=1 -gpu_id=3 -#save cpu and gpu utilization log -if [ -d utilization ];then - rm -rf utilization -else - mkdir utilization -fi -#start server -python3.6 -m paddle_serving_server.serve --model $1 --port 9393 --thread 10 --gpu_ids $gpu_id --use_trt --ir_optim > elog 2>&1 & -sleep 15 - -#warm up -python3.6 benchmark.py --thread 1 --batch_size 1 --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1 -echo -e "import psutil\nimport time\nwhile True:\n\tcpu_res = psutil.cpu_percent()\n\twith open('cpu.txt', 'a+') as f:\n\t\tf.write(f'{cpu_res}\\\n')\n\ttime.sleep(0.1)" > cpu.py -for thread_num in 1 2 4 8 16 -do -for batch_size in 1 4 8 16 32 -do - job_bt=`date '+%Y%m%d%H%M%S'` - nvidia-smi --id=$gpu_id --query-compute-apps=used_memory --format=csv -lms 100 > gpu_memory_use.log 2>&1 & - nvidia-smi --id=$gpu_id --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 & - rm -rf cpu.txt - python3.6 cpu.py & - gpu_memory_pid=$! - python3.6 benchmark.py --thread $thread_num --batch_size $batch_size --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1 - kill `ps -ef|grep used_memory|awk '{print $2}'` > /dev/null - kill `ps -ef|grep utilization.gpu|awk '{print $2}'` > /dev/null - kill `ps -ef|grep cpu.py|awk '{print $2}'` > /dev/null - echo "model_name:" $1 - echo "thread_num:" $thread_num - echo "batch_size:" $batch_size - echo "=================Done====================" - echo "model_name:$1" >> profile_log_$1 - echo "batch_size:$batch_size" >> profile_log_$1 - job_et=`date '+%Y%m%d%H%M%S'` - awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "CPU_UTILIZATION:", max}' cpu.txt >> profile_log_$1 - #awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "MAX_GPU_MEMORY:", max}' gpu_memory_use.log >> profile_log_$1 - #awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "GPU_UTILIZATION:", max}' gpu_utilization.log >> profile_log_$1 - grep -av '^0 %' gpu_utilization.log > gpu_utilization.log.tmp - awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "MAX_GPU_MEMORY:", max}' gpu_memory_use.log >> profile_log_$1 - awk -F" " '{sum+=$1} END {print "GPU_UTILIZATION:", sum/NR, sum, NR }' gpu_utilization.log.tmp >> profile_log_$1 - rm -rf gpu_memory_use.log gpu_utilization.log gpu_utilization.log.tmp - python3.6 ../util/show_profile.py profile $thread_num >> profile_log_$1 - tail -n 10 profile >> profile_log_$1 - echo "" >> profile_log_$1 -done -done - -#Divided log -awk 'BEGIN{RS="\n\n"}{i++}{print > "resnet_log_"i}' profile_log_$1 -mkdir resnet_log && mv resnet_log_* resnet_log -ps -ef|grep 'serving'|grep -v grep|cut -c 9-15 | xargs kill -9 diff --git a/examples/Cpp/PaddleClas/resnet_v2_50/daisy.jpg b/examples/Cpp/PaddleClas/resnet_v2_50/daisy.jpg deleted file mode 100644 index 7edeca63e5f32e68550ef720d81f59df58a8eabc..0000000000000000000000000000000000000000 Binary files a/examples/Cpp/PaddleClas/resnet_v2_50/daisy.jpg and /dev/null differ diff --git a/examples/Cpp/PaddleClas/resnet_v2_50/resnet50_debug.py b/examples/Cpp/PaddleClas/resnet_v2_50/resnet50_debug.py deleted file mode 100644 index 6919b4903686817cdfbb89932396e6db28552ab3..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/resnet_v2_50/resnet50_debug.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_app.reader import Sequential, File2Image, Resize, CenterCrop -from paddle_serving_app.reader import RGB2BGR, Transpose, Div, Normalize -from paddle_serving_app.local_predict import LocalPredictor -import sys - -debugger = LocalPredictor() -debugger.load_model_config(sys.argv[1], gpu=True) - -seq = Sequential([ - File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), - Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) -]) - -image_file = "daisy.jpg" -img = seq(image_file) -fetch_map = debugger.predict(feed={"image": img}, fetch=["feature_map"]) -print(fetch_map["feature_map"].reshape(-1)) diff --git a/examples/Cpp/PaddleClas/resnet_v2_50/resnet50_v2_tutorial.py b/examples/Cpp/PaddleClas/resnet_v2_50/resnet50_v2_tutorial.py deleted file mode 100644 index b249d2a6df85f87258f66c96aaa779eb2e299613..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/resnet_v2_50/resnet50_v2_tutorial.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, File2Image, Resize, CenterCrop -from paddle_serving_app.reader import RGB2BGR, Transpose, Div, Normalize - -client = Client() -client.load_client_config( - "resnet_v2_50_imagenet_client/serving_client_conf.prototxt") -client.connect(["127.0.0.1:9393"]) - -seq = Sequential([ - File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), - Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) -]) - -image_file = "daisy.jpg" -img = seq(image_file) -fetch_map = client.predict(feed={"image": img}, fetch=["score"]) -print(fetch_map["score"].reshape(-1)) diff --git a/examples/Cpp/PaddleClas/resnet_v2_50/run_benchmark.sh b/examples/Cpp/PaddleClas/resnet_v2_50/run_benchmark.sh deleted file mode 100644 index e63be7ed4153c7008ecdb7758d88f73f0444fbdd..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleClas/resnet_v2_50/run_benchmark.sh +++ /dev/null @@ -1,6 +0,0 @@ -if [ ! -x "ResNet50.tar.gz"]; then - wget https://paddle-inference-dist.bj.bcebos.com/AI-Rank/models/Paddle/ResNet50.tar.gz -fi -tar -xzvf ResNet50.tar.gz -python3.6 -m paddle_serving_client.convert --dirname ./ResNet50 --model_filename model --params_filename params -bash benchmark.sh serving_server serving_client diff --git a/examples/Cpp/PaddleDetection/README.md b/examples/Cpp/PaddleDetection/README.md deleted file mode 100644 index e6d4da87aadd005756c151358869ba8632333118..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Serve models from Paddle Detection - -(English|[简体中文](./README_CN.md)) - -### Introduction - -PaddleDetection flying paddle target detection development kit is designed to help developers complete the whole development process of detection model formation, training, optimization and deployment faster and better. For details, see [Github](https://github.com/PaddlePaddle/PaddleDetection/tree/master) - -This article mainly introduces the deployment of Paddle Detection's dynamic graph model on Serving. - -Paddle Detection provides a large number of [Model Zoo](https://github.com/PaddlePaddle/PaddleDetection/blob/master/docs/MODEL_ZOO_cn.md), these model libraries can be used in Paddle Serving with export tools Model. For the export tutorial, please refer to [Paddle Detection Export Model Tutorial (Simplified Chinese)](https://github.com/PaddlePaddle/PaddleDetection/blob/master/deploy/EXPORT_MODEL.md). - -### Serving example -Several examples of PaddleDetection models used in Serving are given in this folder -All examples support TensorRT. - -- [Faster RCNN](./faster_rcnn_r50_fpn_1x_coco) -- [PPYOLO](./ppyolo_r50vd_dcn_1x_coco) -- [TTFNet](./ttfnet_darknet53_1x_coco) -- [YOLOv3](./yolov3_darknet53_270e_coco) -- [HRNet](./faster_rcnn_hrnetv2p_w18_1x) -- [Fcos](./fcos_dcn_r50_fpn_1x_coco) -- [SSD](./ssd_vgg16_300_240e_voc/) diff --git a/examples/Cpp/PaddleDetection/README_CN.md b/examples/Cpp/PaddleDetection/README_CN.md deleted file mode 100644 index f5e62d7a7a0f682aa6e8b6a07ff9b304328d222e..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/README_CN.md +++ /dev/null @@ -1,24 +0,0 @@ -## 使用Paddle Detection模型 - -([English](./README.md)|简体中文) - -### 简介 - -PaddleDetection飞桨目标检测开发套件,旨在帮助开发者更快更好地完成检测模型的组建、训练、优化及部署等全开发流程。详情参见[Github](https://github.com/PaddlePaddle/PaddleDetection/tree/master) - -本文主要是介绍Paddle Detection的动态图模型在Serving上的部署。 - -### 导出模型 - -Paddle Detection提供了大量的[模型库](https://github.com/PaddlePaddle/PaddleDetection/blob/master/docs/MODEL_ZOO_cn.md), 这些模型库配合导出工具都可以得到可以用于Paddle Serving的模型。导出教程参见[Paddle Detection模型导出教程](https://github.com/PaddlePaddle/PaddleDetection/blob/master/deploy/EXPORT_MODEL.md)。 - -### Serving示例 -本文件夹下给出了多个PaddleDetection模型用于Serving的范例 - -- [Faster RCNN](./faster_rcnn_r50_fpn_1x_coco) -- [PPYOLO](./ppyolo_r50vd_dcn_1x_coco) -- [TTFNet](./ttfnet_darknet53_1x_coco) -- [YOLOv3](./yolov3_darknet53_270e_coco) -- [HRNet](./faster_rcnn_hrnetv2p_w18_1x) -- [Fcos](./fcos_dcn_r50_fpn_1x_coco) -- [SSD](./ssd_vgg16_300_240e_voc/) diff --git a/examples/Cpp/PaddleDetection/blazeface/README.md b/examples/Cpp/PaddleDetection/blazeface/README.md deleted file mode 100644 index 29e3026b4d972e141eabcc1a180d7a5cdb804a52..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/blazeface/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Blazeface - -## Get Model -``` -python3 -m paddle_serving_app.package --get_model blazeface -tar -xf blazeface.tar.gz -``` - -## RPC Service - -### Start Service - -``` -python3 -m paddle_serving_server.serve --model serving_server --port 9494 -``` - -### Client Prediction - -``` -python3 test_client.py serving_client/serving_client_conf.prototxt test.jpg -``` - -the result is in `output` folder, including a json file and image file with bounding boxes. diff --git a/examples/Cpp/PaddleDetection/blazeface/test_client.py b/examples/Cpp/PaddleDetection/blazeface/test_client.py deleted file mode 100644 index 5e22cb866e34cba9fbd38c415215b8985b1584b2..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/blazeface/test_client.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -from paddle_serving_app.reader import * -import sys -import numpy as np -from paddle_serving_app.reader import BlazeFacePostprocess - -preprocess = Sequential([ - File2Image(), - Normalize([104, 117, 123], [127.502231, 127.502231, 127.502231], False) -]) - -postprocess = BlazeFacePostprocess("label_list.txt", "output") -client = Client() - -client.load_client_config(sys.argv[1]) -client.connect(['127.0.0.1:9494']) - -im_0 = preprocess(sys.argv[2]) -tmp = Transpose((2, 0, 1)) -im = tmp(im_0) -fetch_map = client.predict( - feed={"image": im}, fetch=["detection_output_0.tmp_0"]) -fetch_map["image"] = sys.argv[2] -fetch_map["im_shape"] = im_0.shape -postprocess(fetch_map) diff --git a/examples/Cpp/PaddleDetection/cascade_rcnn/000000570688.jpg b/examples/Cpp/PaddleDetection/cascade_rcnn/000000570688.jpg deleted file mode 100644 index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000 Binary files a/examples/Cpp/PaddleDetection/cascade_rcnn/000000570688.jpg and /dev/null differ diff --git a/examples/Cpp/PaddleDetection/cascade_rcnn/README.md b/examples/Cpp/PaddleDetection/cascade_rcnn/README.md deleted file mode 100644 index 8029f39a11fcbadefe7f7c77ad709b4a0080707e..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/cascade_rcnn/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Cascade RCNN model on Paddle Serving - -([简体中文](./README_CN.md)|English) - -### Get The Cascade RCNN Model -``` -sh get_data.sh -``` -If you want to have more detection models, please refer to [Paddle Detection Model Zoo](https://github.com/PaddlePaddle/PaddleDetection/blob/release/0.2/docs/MODEL_ZOO_cn.md) - -### Start the service -``` -python3 -m paddle_serving_server.serve --model serving_server --port 9292 --gpu_id 0 -``` - -### Perform prediction -``` -python3 test_client.py 000000570688.jpg -``` - -Image with bounding boxes and json result would be saved in `output` folder. diff --git a/examples/Cpp/PaddleDetection/cascade_rcnn/README_CN.md b/examples/Cpp/PaddleDetection/cascade_rcnn/README_CN.md deleted file mode 100644 index 828aba8a9546465c89ef673625b8b2b5140f96a6..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/cascade_rcnn/README_CN.md +++ /dev/null @@ -1,21 +0,0 @@ -# 使用Paddle Serving部署Cascade RCNN模型 - -(简体中文|[English](./README.md)) - -## 获得Cascade RCNN模型 -``` -sh get_data.sh -``` -如果你想要更多的检测模型,请参考[Paddle检测模型库](https://github.com/PaddlePaddle/PaddleDetection/blob/release/0.2/docs/MODEL_ZOO_cn.md) - -### 启动服务 -``` -python3 -m paddle_serving_server.serve --model serving_server --port 9292 --gpu_id 0 -``` - -### 执行预测 -``` -python3 test_client.py 000000570688.jpg -``` - -客户端已经为图片做好了后处理,在`output`文件夹下存放各个框的json格式信息还有后处理结果图片。 diff --git a/examples/Cpp/PaddleDetection/cascade_rcnn/get_data.sh b/examples/Cpp/PaddleDetection/cascade_rcnn/get_data.sh deleted file mode 100644 index 204ae1a269e00a0156141946db7cfed37475564f..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/cascade_rcnn/get_data.sh +++ /dev/null @@ -1,2 +0,0 @@ -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/cascade_mask_rcnn_r50_vd_fpn_ssld_2x_coco_serving.tar.gz -tar xf cascade_mask_rcnn_r50_vd_fpn_ssld_2x_coco_serving.tar.gz diff --git a/examples/Cpp/PaddleDetection/cascade_rcnn/label_list.txt b/examples/Cpp/PaddleDetection/cascade_rcnn/label_list.txt deleted file mode 100644 index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/cascade_rcnn/label_list.txt +++ /dev/null @@ -1,80 +0,0 @@ -person -bicycle -car -motorcycle -airplane -bus -train -truck -boat -traffic light -fire hydrant -stop sign -parking meter -bench -bird -cat -dog -horse -sheep -cow -elephant -bear -zebra -giraffe -backpack -umbrella -handbag -tie -suitcase -frisbee -skis -snowboard -sports ball -kite -baseball bat -baseball glove -skateboard -surfboard -tennis racket -bottle -wine glass -cup -fork -knife -spoon -bowl -banana -apple -sandwich -orange -broccoli -carrot -hot dog -pizza -donut -cake -chair -couch -potted plant -bed -dining table -toilet -tv -laptop -mouse -remote -keyboard -cell phone -microwave -oven -toaster -sink -refrigerator -book -clock -vase -scissors -teddy bear -hair drier -toothbrush diff --git a/examples/Cpp/PaddleDetection/cascade_rcnn/test_client.py b/examples/Cpp/PaddleDetection/cascade_rcnn/test_client.py deleted file mode 100644 index aac9f67216863c5f4ecb6bd45dc57dfc8c50ab32..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/cascade_rcnn/test_client.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import numpy as np -from paddle_serving_client import Client -from paddle_serving_app.reader import * -import cv2 - -preprocess = DetectionSequential([ - DetectionFile2Image(), - DetectionResize((800, 1333), True, interpolation=2), - DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True), - DetectionTranspose((2,0,1)), - DetectionPadStride(32) -]) - -postprocess = RCNNPostprocess("label_list.txt", "output") -client = Client() - -client.load_client_config("serving_client/serving_client_conf.prototxt") -client.connect(['127.0.0.1:9292']) - -im, im_info = preprocess(sys.argv[1]) -fetch_map = client.predict( - feed={ - "image": im, - "im_shape": np.array(list(im.shape[1:])).reshape(-1), - "scale_factor": im_info['scale_factor'], - }, - fetch=["save_infer_model/scale_0.tmp_1"], - batch=False) -print(fetch_map) -fetch_map["image"] = sys.argv[1] -postprocess(fetch_map) diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/000000570688.jpg b/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/000000570688.jpg deleted file mode 100644 index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000 Binary files a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/000000570688.jpg and /dev/null differ diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/README.md b/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/README.md deleted file mode 100644 index 3c0fb8dbee6c0d6eac7b09cb16428679cb8b9e5d..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Faster RCNN HRNet model on Paddle Serving - -([简体中文](./README_CN.md)|English) - -### Get The Faster RCNN HRNet Model -``` -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/faster_rcnn_hrnetv2p_w18_1x.tar.gz -``` - -### Start the service -``` -tar xf faster_rcnn_hrnetv2p_w18_1x.tar.gz -python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0 -``` - -This model support TensorRT, if you want a faster inference, please use `--use_trt`. But you need to do some extra work. -Please reference to https://github.com/PaddlePaddle/Paddle-Inference-Demo/blob/master/c%2B%2B/paddle-trt/trt_dynamic_shape_test.cc#L40 - - -### Prediction -``` -python3 test_client.py 000000570688.jpg -``` diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/README_CN.md b/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/README_CN.md deleted file mode 100644 index 11dcbd85fe62f4dae5a4714ad3996424499024c0..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/README_CN.md +++ /dev/null @@ -1,22 +0,0 @@ -# 使用Paddle Serving部署Faster RCNN HRNet模型 - -(简体中文|[English](./README.md)) - -## 获得Faster RCNN HRNet模型 -``` -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/faster_rcnn_hrnetv2p_w18_1x.tar.gz -``` - - -### 启动服务 -``` -tar xf faster_rcnn_hrnetv2p_w18_1x.tar.gz -python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0 -``` -该模型支持TensorRT,如果想要更快的预测速度,可以开启`--use_trt`选项,但此时需要额外设置子图的TRT变长最大最小最优shape. -请参考https://github.com/PaddlePaddle/Paddle-Inference-Demo/blob/master/c%2B%2B/paddle-trt/trt_dynamic_shape_test.cc#L40 - -### 执行预测 -``` -python3 test_client.py 000000570688.jpg -``` diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/label_list.txt b/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/label_list.txt deleted file mode 100644 index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/label_list.txt +++ /dev/null @@ -1,80 +0,0 @@ -person -bicycle -car -motorcycle -airplane -bus -train -truck -boat -traffic light -fire hydrant -stop sign -parking meter -bench -bird -cat -dog -horse -sheep -cow -elephant -bear -zebra -giraffe -backpack -umbrella -handbag -tie -suitcase -frisbee -skis -snowboard -sports ball -kite -baseball bat -baseball glove -skateboard -surfboard -tennis racket -bottle -wine glass -cup -fork -knife -spoon -bowl -banana -apple -sandwich -orange -broccoli -carrot -hot dog -pizza -donut -cake -chair -couch -potted plant -bed -dining table -toilet -tv -laptop -mouse -remote -keyboard -cell phone -microwave -oven -toaster -sink -refrigerator -book -clock -vase -scissors -teddy bear -hair drier -toothbrush diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/test_client.py b/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/test_client.py deleted file mode 100644 index 329f6effb4cb8a8a163cada106f6aaacc1cc3857..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/faster_rcnn_hrnetv2p_w18_1x/test_client.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -import numpy as np -from paddle_serving_client import Client -from paddle_serving_app.reader import * -import cv2 - -preprocess = DetectionSequential([ - DetectionFile2Image(), - DetectionResize((800, 1333), True, interpolation=2), - DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True), - DetectionTranspose((2,0,1)), - DetectionPadStride(32) -]) - -postprocess = RCNNPostprocess("label_list.txt", "output") -client = Client() - -client.load_client_config("serving_client/serving_client_conf.prototxt") -client.connect(['127.0.0.1:9494']) - -im, im_info = preprocess(sys.argv[1]) -fetch_map = client.predict( - feed={ - "image": im, - "im_shape": np.array(list(im.shape[1:])).reshape(-1), - "scale_factor": im_info['scale_factor'], - }, - fetch=["save_infer_model/scale_0.tmp_1"], - batch=False) -print(fetch_map) -fetch_map["image"] = sys.argv[1] -postprocess(fetch_map) diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/000000570688.jpg b/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/000000570688.jpg deleted file mode 100644 index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000 Binary files a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/000000570688.jpg and /dev/null differ diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/README.md b/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/README.md deleted file mode 100644 index d56aa416b9e54114646f9271c27f6afde7d41259..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Faster RCNN model on Paddle Serving - -([简体中文](./README_CN.md)|English) - -### Get The Faster RCNN Model -``` -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/faster_rcnn_r50_fpn_1x_coco.tar -``` - -### Start the service -``` -tar xf faster_rcnn_r50_fpn_1x_coco.tar -python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0 -``` - -This model support TensorRT, if you want a faster inference, please use `--use_trt`. But you need to do some extra work. -Please reference to https://github.com/PaddlePaddle/Paddle-Inference-Demo/blob/master/c%2B%2B/paddle-trt/trt_dynamic_shape_test.cc#L40 - - -### Perform prediction -``` -python3 test_client.py 000000570688.jpg -``` - -## 3. Result analysis -
-
-
-
-
-This is the input picture - -
-
-
-
-
- -This is the picture after adding bbox. You can see that the client has done post-processing for the picture. In addition, the output/bbox.json also has the number and coordinate information of each box. diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/README_CN.md b/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/README_CN.md deleted file mode 100644 index f8475daf029ae2230432871237281970052fe3e3..0000000000000000000000000000000000000000 --- a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/README_CN.md +++ /dev/null @@ -1,37 +0,0 @@ -# 使用Paddle Serving部署Faster RCNN模型 - -(简体中文|[English](./README.md)) - -## 获得Faster RCNN模型 -``` -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/faster_rcnn_r50_fpn_1x_coco.tar -``` - - -### 启动服务 -``` -tar xf faster_rcnn_r50_fpn_1x_coco.tar -python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0 -``` -该模型支持TensorRT,如果想要更快的预测速度,可以开启`--use_trt`选项,但此时需要额外设置子图的TRT变长最大最小最优shape. -请参考https://github.com/PaddlePaddle/Paddle-Inference-Demo/blob/master/c%2B%2B/paddle-trt/trt_dynamic_shape_test.cc#L40 - -### 执行预测 -``` -python3 test_client.py 000000570688.jpg -``` - -## 3. 结果分析 -
-
-
-
-
-这是输入图片 - -
-
-
-
-
-这是实现添加了bbox之后的图片,可以看到客户端已经为图片做好了后处理,此外在output/bbox.json也有各个框的编号和坐标信息。
diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/label_list.txt b/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/label_list.txt
deleted file mode 100644
index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/label_list.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-person
-bicycle
-car
-motorcycle
-airplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-backpack
-umbrella
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-couch
-potted plant
-bed
-dining table
-toilet
-tv
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
-toothbrush
diff --git a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/test_client.py b/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/test_client.py
deleted file mode 100644
index b6b2c534b0609692fea34bafcf4059222738debd..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/faster_rcnn_r50_fpn_1x_coco/test_client.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import numpy as np
-from paddle_serving_client import Client
-from paddle_serving_app.reader import *
-import cv2
-
-preprocess = DetectionSequential([
- DetectionFile2Image(),
- DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True),
- DetectionResize(
- (800, 1333), True, interpolation=cv2.INTER_LINEAR),
- DetectionTranspose((2,0,1)),
- DetectionPadStride(128)
-])
-
-postprocess = RCNNPostprocess("label_list.txt", "output")
-client = Client()
-
-client.load_client_config("serving_client/serving_client_conf.prototxt")
-client.connect(['127.0.0.1:9494'])
-
-im, im_info = preprocess(sys.argv[1])
-fetch_map = client.predict(
- feed={
- "image": im,
- "im_shape": np.array(list(im.shape[1:])).reshape(-1),
- "scale_factor": im_info['scale_factor'],
- },
- fetch=["save_infer_model/scale_0.tmp_1"],
- batch=False)
-fetch_map["image"] = sys.argv[1]
-postprocess(fetch_map)
diff --git a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/000000014439.jpg b/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/000000014439.jpg
deleted file mode 100644
index 0abbdab06eb5950b93908cc91adfa640e8a3ac78..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/000000014439.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/README.md b/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/README.md
deleted file mode 100644
index 58d13e53fe9ac3b177a3b6e6661a1370efa796b9..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# FCOS model on Paddle Serving
-
-([简体中文](./README_CN.md)|English)
-
-### Get Model
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/fcos_dcn_r50_fpn_1x_coco.tar
-```
-
-### Start the service
-```
-tar xf fcos_dcn_r50_fpn_1x_coco.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-This model support TensorRT, if you want a faster inference, please use `--use_trt`.
-
-### Perform prediction
-```
-python3 test_client.py 000000014439.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/README_CN.md b/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/README_CN.md
deleted file mode 100644
index af2fd8753cc56ef9c732c21020712674313ac4fa..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/README_CN.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# 使用Paddle Serving部署FCOS模型
-
-(简体中文|[English](./README.md))
-
-## 获得模型
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/fcos_dcn_r50_fpn_1x_coco.tar
-```
-
-
-### 启动服务
-```
-tar xf fcos_dcn_r50_fpn_1x_coco.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-
-该模型支持TensorRT,如果想要更快的预测速度,可以开启`--use_trt`选项。
-
-### 执行预测
-```
-python3 test_client.py 000000014439.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/label_list.txt b/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/label_list.txt
deleted file mode 100644
index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/label_list.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-person
-bicycle
-car
-motorcycle
-airplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-backpack
-umbrella
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-couch
-potted plant
-bed
-dining table
-toilet
-tv
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
-toothbrush
diff --git a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/test_client.py b/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/test_client.py
deleted file mode 100644
index 7ad59d75b84cad081449df31393e06a26d7441dd..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/fcos_dcn_r50_fpn_1x_coco/test_client.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import numpy as np
-from paddle_serving_client import Client
-from paddle_serving_app.reader import *
-import cv2
-
-preprocess = DetectionSequential([
- DetectionFile2Image(),
- DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True),
- DetectionResize(
- (800, 1333), True, interpolation=cv2.INTER_LINEAR),
- DetectionTranspose((2,0,1)),
- DetectionPadStride(128)
-])
-
-postprocess = RCNNPostprocess("label_list.txt", "output")
-client = Client()
-
-client.load_client_config("serving_client/serving_client_conf.prototxt")
-client.connect(['127.0.0.1:9494'])
-
-im, im_info = preprocess(sys.argv[1])
-fetch_map = client.predict(
- feed={
- "image": im,
- "scale_factor": im_info['scale_factor'],
- },
- fetch=["save_infer_model/scale_0.tmp_1"],
- batch=False)
-print(fetch_map)
-fetch_map["image"] = sys.argv[1]
-postprocess(fetch_map)
diff --git a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/000000570688.jpg b/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/000000570688.jpg
deleted file mode 100644
index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/000000570688.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/README.md b/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/README.md
deleted file mode 100644
index 8060e087107e54bc401849fd576497e9fc9cd421..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# PP-YOLO model on Paddle Serving
-
-([简体中文](./README_CN.md)|English)
-
-### Get The Model
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/ppyolo_r50vd_dcn_1x_coco.tar
-```
-
-### Start the service
-```
-tar xf ppyolo_r50vd_dcn_1x_coco.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-
-This model support TensorRT, if you want a faster inference, please use `--use_trt`.
-
-### Perform prediction
-```
-python3 test_client.py 000000570688.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/README_CN.md b/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/README_CN.md
deleted file mode 100644
index 3071db7b124fd998d15901be7a78a67018d0de0f..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/README_CN.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# 使用Paddle Serving部署PP-YOLO模型
-
-(简体中文|[English](./README.md))
-
-## 获得模型
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/ppyolo_r50vd_dcn_1x_coco.tar
-```
-
-
-### 启动服务
-```
-tar xf ppyolo_r50vd_dcn_1x_coco.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-
-该模型支持TensorRT,如果想要更快的预测速度,可以开启`--use_trt`选项。
-
-### 执行预测
-```
-python3 test_client.py 000000570688.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/label_list.txt b/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/label_list.txt
deleted file mode 100644
index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/label_list.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-person
-bicycle
-car
-motorcycle
-airplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-backpack
-umbrella
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-couch
-potted plant
-bed
-dining table
-toilet
-tv
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
-toothbrush
diff --git a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/test_client.py b/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/test_client.py
deleted file mode 100644
index f40f2d5c87bdd64f588620d2d6f6ebf98a3894a7..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ppyolo_r50vd_dcn_1x_coco/test_client.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import numpy as np
-from paddle_serving_client import Client
-from paddle_serving_app.reader import *
-import cv2
-
-preprocess = DetectionSequential([
- DetectionFile2Image(),
- DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True),
- DetectionResize(
- (608, 608), False, interpolation=2),
- DetectionTranspose((2,0,1))
-])
-
-postprocess = RCNNPostprocess("label_list.txt", "output")
-client = Client()
-
-client.load_client_config("serving_client/serving_client_conf.prototxt")
-client.connect(['127.0.0.1:9494'])
-
-im, im_info = preprocess(sys.argv[1])
-fetch_map = client.predict(
- feed={
- "image": im,
- "im_shape": np.array(list(im.shape[1:])).reshape(-1),
- "scale_factor": im_info['scale_factor'],
- },
- fetch=["save_infer_model/scale_0.tmp_1"],
- batch=False)
-fetch_map["image"] = sys.argv[1]
-postprocess(fetch_map)
diff --git a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/000000014439.jpg b/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/000000014439.jpg
deleted file mode 100644
index 0abbdab06eb5950b93908cc91adfa640e8a3ac78..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/000000014439.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/README.md b/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/README.md
deleted file mode 100644
index 8a9a766c7b24d8468cbc72d6affd90263e86b013..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# SSD model on Paddle Serving
-
-([简体中文](./README_CN.md)|English)
-
-### Get Model
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/ssd_vgg16_300_240e_voc.tar
-```
-
-### Start the service
-```
-tar xf ssd_vgg16_300_240e_voc.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-This model support TensorRT, if you want a faster inference, please use `--use_trt`.
-
-### Perform prediction
-```
-python3 test_client.py 000000014439.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/README_CN.md b/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/README_CN.md
deleted file mode 100644
index d3df37d774bd1a478af0a41a9fca9f238ca69aac..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/README_CN.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# 使用Paddle Serving部署SSD模型
-
-(简体中文|[English](./README.md))
-
-## 获得模型
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/ssd_vgg16_300_240e_voc.tar
-```
-
-
-### 启动服务
-```
-tar xf ssd_vgg16_300_240e_voc.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-
-该模型支持TensorRT,如果想要更快的预测速度,可以开启`--use_trt`选项。
-
-### 执行预测
-```
-python3 test_client.py 000000014439.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/label_list.txt b/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/label_list.txt
deleted file mode 100644
index 8420ab35ede7400974f25836a6bb543024686a0e..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/label_list.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-aeroplane
-bicycle
-bird
-boat
-bottle
-bus
-car
-cat
-chair
-cow
-diningtable
-dog
-horse
-motorbike
-person
-pottedplant
-sheep
-sofa
-train
-tvmonitor
diff --git a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/test_client.py b/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/test_client.py
deleted file mode 100644
index 1df635c89d7228d10d8b08d4e011713200e9c828..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ssd_vgg16_300_240e_voc/test_client.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import numpy as np
-from paddle_serving_client import Client
-from paddle_serving_app.reader import *
-import cv2
-
-preprocess = DetectionSequential([
- DetectionFile2Image(),
- DetectionResize(
- (300, 300), False, interpolation=cv2.INTER_LINEAR),
- DetectionNormalize([104.0, 117.0, 123.0], [1.0, 1.0, 1.0], False),
- DetectionTranspose((2,0,1)),
-])
-
-postprocess = RCNNPostprocess("label_list.txt", "output")
-client = Client()
-
-client.load_client_config("serving_client/serving_client_conf.prototxt")
-client.connect(['127.0.0.1:9494'])
-
-im, im_info = preprocess(sys.argv[1])
-fetch_map = client.predict(
- feed={
- "image": im,
- "im_shape": np.array(list(im.shape[1:])).reshape(-1),
- "scale_factor": im_info['scale_factor'],
- },
- fetch=["save_infer_model/scale_0.tmp_1"],
- batch=False)
-print(fetch_map)
-fetch_map["image"] = sys.argv[1]
-postprocess(fetch_map)
diff --git a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/000000570688.jpg b/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/000000570688.jpg
deleted file mode 100644
index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/000000570688.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/README.md b/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/README.md
deleted file mode 100644
index adf5de2abd39c3b440ac43ab9b1c1c58aba69c51..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
-# TTF-Net model on Paddle Serving
-
-([简体中文](./README_CN.md)|English)
-
-### Get Model
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/ttfnet_darknet53_1x_coco.tar
-```
-
-### Start the service
-```
-tar xf ttfnet_darknet53_1x_coco.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-This model support TensorRT, if you want a faster inference, please use `--use_trt`.
-
-### Perform prediction
-```
-python3 test_client.py 000000570688.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/README_CN.md b/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/README_CN.md
deleted file mode 100644
index 7a2c860967643585023ce0f644a36e9c056c21a2..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/README_CN.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# 使用Paddle Serving部署TTF-Net模型
-
-(简体中文|[English](./README.md))
-
-## 获得模型
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/ttfnet_darknet53_1x_coco.tar
-```
-
-
-### 启动服务
-```
-tar xf ttfnet_darknet53_1x_coco.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-
-该模型支持TensorRT,如果想要更快的预测速度,可以开启`--use_trt`选项。
-
-### 执行预测
-```
-python3 test_client.py 000000570688.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/label_list.txt b/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/label_list.txt
deleted file mode 100644
index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/label_list.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-person
-bicycle
-car
-motorcycle
-airplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-backpack
-umbrella
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-couch
-potted plant
-bed
-dining table
-toilet
-tv
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
-toothbrush
diff --git a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/test_client.py b/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/test_client.py
deleted file mode 100644
index f735c01bc52db529a6823dbff4e72eb236525344..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/ttfnet_darknet53_1x_coco/test_client.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import sys
-import numpy as np
-from paddle_serving_client import Client
-from paddle_serving_app.reader import *
-import cv2
-
-preprocess = DetectionSequential([
- DetectionFile2Image(),
- DetectionResize(
- (512, 512), False, interpolation=cv2.INTER_LINEAR),
- DetectionNormalize([123.675, 116.28, 103.53], [58.395, 57.12, 57.375], False),
- DetectionTranspose((2,0,1))
-])
-
-postprocess = RCNNPostprocess("label_list.txt", "output")
-client = Client()
-
-client.load_client_config("serving_client/serving_client_conf.prototxt")
-client.connect(['127.0.0.1:9494'])
-
-im, im_info = preprocess(sys.argv[1])
-
-
-fetch_map = client.predict(
- feed={
- "image": im,
- "im_shape": np.array(list(im.shape[1:])).reshape(-1),
- "scale_factor": im_info['scale_factor'],
- },
- fetch=["save_infer_model/scale_0.tmp_1"],
- batch=False)
-print(fetch_map)
diff --git a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/000000570688.jpg b/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/000000570688.jpg
deleted file mode 100644
index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/000000570688.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/README.md b/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/README.md
deleted file mode 100644
index 32670748db42336053d01e61bf087d00c03c7e06..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# YOLOv3 model on Paddle Serving
-
-([简体中文](./README_CN.md)|English)
-
-### Get Model
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/yolov3_darknet53_270e_coco.tar
-```
-
-### Start the service
-```
-tar xf yolov3_darknet53_270e_coco.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-
-This model support TensorRT, if you want a faster inference, please use `--use_trt`.
-
-### Perform prediction
-```
-python3 test_client.py 000000570688.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/README_CN.md b/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/README_CN.md
deleted file mode 100644
index 4185e0fe4963113ed0f9c0ea865705fd33226d1b..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/README_CN.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# 使用Paddle Serving部署YOLOv3模型
-
-(简体中文|[English](./README.md))
-
-## 获得模型
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/pddet_demo/2.0/yolov3_darknet53_270e_coco.tar
-```
-
-
-### 启动服务
-```
-tar xf yolov3_darknet53_270e_coco.tar
-python3 -m paddle_serving_server.serve --model serving_server --port 9494 --gpu_ids 0
-```
-
-该模型支持TensorRT,如果想要更快的预测速度,可以开启`--use_trt`选项。
-
-### 执行预测
-```
-python3 test_client.py 000000570688.jpg
-```
diff --git a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/label_list.txt b/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/label_list.txt
deleted file mode 100644
index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/label_list.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-person
-bicycle
-car
-motorcycle
-airplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-backpack
-umbrella
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-couch
-potted plant
-bed
-dining table
-toilet
-tv
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
-toothbrush
diff --git a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/test_client.py b/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/test_client.py
deleted file mode 100644
index 04f21b32aebbf83694fa37aa30193ec5d5b7dbac..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/yolov3_darknet53_270e_coco/test_client.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import numpy as np
-from paddle_serving_client import Client
-from paddle_serving_app.reader import *
-import cv2
-
-preprocess = DetectionSequential([
- DetectionFile2Image(),
- DetectionResize(
- (608, 608), False, interpolation=2),
- DetectionNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True),
- DetectionTranspose((2,0,1)),
-])
-
-postprocess = RCNNPostprocess("label_list.txt", "output")
-client = Client()
-
-client.load_client_config("serving_client/serving_client_conf.prototxt")
-client.connect(['127.0.0.1:9494'])
-
-im, im_info = preprocess(sys.argv[1])
-fetch_map = client.predict(
- feed={
- "image": im,
- "im_shape": np.array(list(im.shape[1:])).reshape(-1),
- "scale_factor": im_info['scale_factor'],
- },
- fetch=["save_infer_model/scale_0.tmp_1"],
- batch=False)
-fetch_map["image"] = sys.argv[1]
-postprocess(fetch_map)
diff --git a/examples/Cpp/PaddleDetection/yolov4/000000570688.jpg b/examples/Cpp/PaddleDetection/yolov4/000000570688.jpg
deleted file mode 100644
index cb304bd56c4010c08611a30dcca58ea9140cea54..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleDetection/yolov4/000000570688.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleDetection/yolov4/README.md b/examples/Cpp/PaddleDetection/yolov4/README.md
deleted file mode 100644
index 0c7cfa7c0ffb4938456aa908015aff2daf367727..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/yolov4/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Yolov4 Detection Service
-
-([简体中文](README_CN.md)|English)
-
-## Get Model
-
-```
-python3 -m paddle_serving_app.package --get_model yolov4
-tar -xzvf yolov4.tar.gz
-```
-
-## Start RPC Service
-
-```
-python3 -m paddle_serving_server.serve --model yolov4_model --port 9393 --gpu_ids 0
-```
-
-## Prediction
-
-```
-python3 test_client.py 000000570688.jpg
-```
-After the prediction is completed, a json file to save the prediction result and a picture with the detection result box will be generated in the `./outpu folder.
diff --git a/examples/Cpp/PaddleDetection/yolov4/README_CN.md b/examples/Cpp/PaddleDetection/yolov4/README_CN.md
deleted file mode 100644
index 1c773033418b9d072a7096a91d47b665b465c322..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/yolov4/README_CN.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Yolov4 检测服务
-
-(简体中文|[English](README.md))
-
-## 获取模型
-
-```
-python3 -m paddle_serving_app.package --get_model yolov4
-tar -xzvf yolov4.tar.gz
-```
-
-## 启动RPC服务
-
-```
-python3 -m paddle_serving_server.serve --model yolov4_model --port 9393 --gpu_ids 0
-```
-
-## 预测
-
-```
-python3 test_client.py 000000570688.jpg
-```
-
-预测完成会在`./output`文件夹下生成保存预测结果的json文件以及标出检测结果框的图片。
diff --git a/examples/Cpp/PaddleDetection/yolov4/label_list.txt b/examples/Cpp/PaddleDetection/yolov4/label_list.txt
deleted file mode 100644
index 941cb4e1392266f6a6c09b1fdc5f79503b2e5df6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/yolov4/label_list.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-person
-bicycle
-car
-motorcycle
-airplane
-bus
-train
-truck
-boat
-traffic light
-fire hydrant
-stop sign
-parking meter
-bench
-bird
-cat
-dog
-horse
-sheep
-cow
-elephant
-bear
-zebra
-giraffe
-backpack
-umbrella
-handbag
-tie
-suitcase
-frisbee
-skis
-snowboard
-sports ball
-kite
-baseball bat
-baseball glove
-skateboard
-surfboard
-tennis racket
-bottle
-wine glass
-cup
-fork
-knife
-spoon
-bowl
-banana
-apple
-sandwich
-orange
-broccoli
-carrot
-hot dog
-pizza
-donut
-cake
-chair
-couch
-potted plant
-bed
-dining table
-toilet
-tv
-laptop
-mouse
-remote
-keyboard
-cell phone
-microwave
-oven
-toaster
-sink
-refrigerator
-book
-clock
-vase
-scissors
-teddy bear
-hair drier
-toothbrush
diff --git a/examples/Cpp/PaddleDetection/yolov4/test_client.py b/examples/Cpp/PaddleDetection/yolov4/test_client.py
deleted file mode 100644
index dfcd58610c3b8df1a1579350c6bb756119cf6940..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleDetection/yolov4/test_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import numpy as np
-from paddle_serving_client import Client
-from paddle_serving_app.reader import *
-import cv2
-preprocess = Sequential([
- File2Image(), BGR2RGB(), Resize(
- (608, 608), interpolation=cv2.INTER_LINEAR), Div(255.0), Transpose(
- (2, 0, 1))
-])
-
-postprocess = RCNNPostprocess("label_list.txt", "output", [608, 608])
-client = Client()
-
-client.load_client_config("yolov4_client/serving_client_conf.prototxt")
-client.connect(['127.0.0.1:9393'])
-
-im = preprocess(sys.argv[1])
-fetch_map = client.predict(
- feed={
- "image": im,
- "im_size": np.array(list(im.shape[1:])),
- },
- fetch=["save_infer_model/scale_0.tmp_0"],
- batch=False)
-fetch_map["image"] = sys.argv[1]
-postprocess(fetch_map)
diff --git a/examples/Cpp/PaddleNLP/bert/README.md b/examples/Cpp/PaddleNLP/bert/README.md
deleted file mode 100755
index 5d3242837f6d8be08f321d68890587e4bba725e8..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/README.md
+++ /dev/null
@@ -1,80 +0,0 @@
-Http## Bert as service
-
-([简体中文](./README_CN.md)|English)
-
-In the example, a BERT model is used for semantic understanding prediction, and the text is represented as a vector, which can be used for further analysis and prediction.
-If your python version is 3.X, replace the 'pip' field in the following command with 'pip3',replace 'python' with 'python3'.
-
-### Getting Model
-method 1:
-This example use model [BERT Chinese Model](https://www.paddlepaddle.org.cn/hubdetail?name=bert_chinese_L-12_H-768_A-12&en_category=SemanticModel) from [Paddlehub](https://github.com/PaddlePaddle/PaddleHub).
-
-Install paddlehub first
-```
-pip3 install paddlehub
-```
-
-run
-```
-python3 prepare_model.py 128
-```
-
-**PaddleHub only support Python 3.5+**
-
-the 128 in the command above means max_seq_len in BERT model, which is the length of sample after preprocessing.
-the config file and model file for server side are saved in the folder bert_seq128_model.
-the config file generated for client side is saved in the folder bert_seq128_client.
-
-method 2:
-You can also download the above model from BOS(max_seq_len=128). After decompression, the config file and model file for server side are stored in the bert_chinese_L-12_H-768_A-12_model folder, and the config file generated for client side is stored in the bert_chinese_L-12_H-768_A-12_client folder:
-```shell
-wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SemanticModel/bert_chinese_L-12_H-768_A-12.tar.gz
-tar -xzf bert_chinese_L-12_H-768_A-12.tar.gz
-mv bert_chinese_L-12_H-768_A-12_model bert_seq128_model
-mv bert_chinese_L-12_H-768_A-12_client bert_seq128_client
-```
-if your model is bert_chinese_L-12_H-768_A-12_model, replace the 'bert_seq128_model' field in the following command with 'bert_chinese_L-12_H-768_A-12_model',replace 'bert_seq128_client' with 'bert_chinese_L-12_H-768_A-12_client'.
-
-### Getting Dict and Sample Dataset
-
-```
-sh get_data.sh
-```
-this script will download Chinese Dictionary File vocab.txt and Chinese Sample Data data-c.txt
-
-### Inference Service(Support BRPC-Client、GRPC-Client、Http-Client)
-start cpu inference service,Run
-```
-python3 -m paddle_serving_server.serve --model bert_seq128_model/ --port 9292 #cpu inference service
-```
-Or,start gpu inference service,Run
-```
-python3 -m paddle_serving_server.serve --model bert_seq128_model/ --port 9292 --gpu_ids 0 #launch gpu inference service at GPU 0
-```
-
-### BRPC-Client Inference
-
-before prediction we should install paddle_serving_app. This module provides data preprocessing for BERT model.
-```
-pip3 install paddle_serving_app
-```
-Run
-```
-head data-c.txt | python3 bert_client.py --model bert_seq128_client/serving_client_conf.prototxt
-```
-
-the client reads data from data-c.txt and send prediction request, the prediction is given by word vector. (Due to massive data in the word vector, we do not print it).
-
-#### GRPC-Client/HTTP-Client
-Run
-```
-head data-c.txt | python3 bert_httpclient.py --model bert_seq128_client/serving_client_conf.prototxt
-
-```
-
-
-## Benchmark
-``` shell
-bash benchmark.sh bert_seq128_model bert_seq128_client
-```
-The output log file of benchmark named `profile_log_bert_seq128_model`
diff --git a/examples/Cpp/PaddleNLP/bert/README_CN.md b/examples/Cpp/PaddleNLP/bert/README_CN.md
deleted file mode 100755
index 42bc3ffab0ad51e304b11a78634b5a90415d1ace..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/README_CN.md
+++ /dev/null
@@ -1,85 +0,0 @@
-## 语义理解预测服务
-
-(简体中文|[English](./README.md))
-
-示例中采用BERT模型进行语义理解预测,将文本表示为向量的形式,可以用来做进一步的分析和预测。
-
-若使用python的版本为3.X, 将以下命令中的pip 替换为pip3, python替换为python3.
-### 获取模型
-方法1:
-示例中采用[Paddlehub](https://github.com/PaddlePaddle/PaddleHub)中的[BERT中文模型](https://www.paddlepaddle.org.cn/hubdetail?name=bert_chinese_L-12_H-768_A-12&en_category=SemanticModel)。
-请先安装paddlehub
-```
-pip3 install paddlehub
-```
-执行
-```
-python3 prepare_model.py 128
-```
-参数128表示BERT模型中的max_seq_len,即预处理后的样本长度。
-生成server端配置文件与模型文件,存放在bert_seq128_model文件夹。
-生成client端配置文件,存放在bert_seq128_client文件夹。
-
-方法2:
-您也可以从bos上直接下载上述模型(max_seq_len=128),解压后server端配置文件与模型文件存放在bert_chinese_L-12_H-768_A-12_model文件夹,client端配置文件存放在bert_chinese_L-12_H-768_A-12_client文件夹:
-```shell
-wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SemanticModel/bert_chinese_L-12_H-768_A-12.tar.gz
-tar -xzf bert_chinese_L-12_H-768_A-12.tar.gz
-mv bert_chinese_L-12_H-768_A-12_model bert_seq128_model
-mv bert_chinese_L-12_H-768_A-12_client bert_seq128_client
-```
-若使用bert_chinese_L-12_H-768_A-12_model模型,将下面命令中的bert_seq128_model字段替换为bert_chinese_L-12_H-768_A-12_model,bert_seq128_client字段替换为bert_chinese_L-12_H-768_A-12_client.
-
-
-
-
-### 获取词典和样例数据
-
-```
-sh get_data.sh
-```
-脚本将下载中文词典vocab.txt和中文样例数据data-c.txt
-
-### 启动预测服务(支持BRPC-Client、GRPC-Client、HTTP-Client三种方式访问)
-启动cpu预测服务,执行
-```
-python3 -m paddle_serving_server.serve --model bert_seq128_model/ --port 9292 #启动cpu预测服务
-
-```
-或者,启动gpu预测服务,执行
-```
-python3 -m paddle_serving_server.serve --model bert_seq128_model/ --port 9292 --gpu_ids 0 #在gpu 0上启动gpu预测服务
-
-```
-
-### 执行预测
-
-执行预测前需要安装paddle_serving_app,模块中提供了BERT模型的数据预处理方法。
-```
-pip3 install paddle_serving_app
-```
-
-#### BRPC-Client
-执行
-```
-head data-c.txt | python3 bert_client.py --model bert_seq128_client/serving_client_conf.prototxt
-
-```
-启动client读取data-c.txt中的数据进行预测,预测结果为文本的向量表示(由于数据较多,脚本中没有将输出进行打印),server端的地址在脚本中修改。
-
-#### GRPC-Client/HTTP-Client
-执行
-```
-head data-c.txt | python3 bert_httpclient.py --model bert_seq128_client/serving_client_conf.prototxt
-
-```
-
-## 性能测试
-``` shell
-bash benchmark.sh bert_seq128_model bert_seq128_client
-```
-性能测试的日志文件为profile_log_bert_seq128_model
-
-如需修改性能测试用例的参数,请修改benchmark.sh中的配置信息。
-
-注意:bert_seq128_model和bert_seq128_client路径后不要加'/'符号,示例需要在GPU机器上运行。
diff --git a/examples/Cpp/PaddleNLP/bert/batching.py b/examples/Cpp/PaddleNLP/bert/batching.py
deleted file mode 100644
index 5ec5f320cf5ec7bd0ab4624d9b39ef936553c774..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/batching.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#coding:utf-8
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Mask, padding and batching."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import numpy as np
-
-
-def prepare_batch_data(insts,
- total_token_num,
- max_seq_len=128,
- pad_id=None,
- cls_id=None,
- sep_id=None,
- mask_id=None,
- return_input_mask=True,
- return_max_len=True,
- return_num_token=False):
- """
- 1. generate Tensor of data
- 2. generate Tensor of position
- 3. generate self attention mask, [shape: batch_size * max_len * max_len]
- """
-
- batch_src_ids = [inst[0] for inst in insts]
- batch_sent_ids = [inst[1] for inst in insts]
- batch_pos_ids = [inst[2] for inst in insts]
- labels_list = []
- # compatible with squad, whose example includes start/end positions,
- # or unique id
-
- for i in range(3, len(insts[0]), 1):
- labels = [inst[i] for inst in insts]
- labels = np.array(labels).astype("int64").reshape([-1, 1])
- labels_list.append(labels)
-
- out = batch_src_ids
- # Second step: padding
- src_id, self_input_mask = pad_batch_data(
- out, pad_idx=pad_id, max_seq_len=max_seq_len, return_input_mask=True)
- pos_id = pad_batch_data(
- batch_pos_ids,
- pad_idx=pad_id,
- max_seq_len=max_seq_len,
- return_pos=False,
- return_input_mask=False)
- sent_id = pad_batch_data(
- batch_sent_ids,
- pad_idx=pad_id,
- max_seq_len=max_seq_len,
- return_pos=False,
- return_input_mask=False)
-
- return_list = [src_id, pos_id, sent_id, self_input_mask] + labels_list
-
- return return_list if len(return_list) > 1 else return_list[0]
-
-
-def pad_batch_data(insts,
- pad_idx=0,
- max_seq_len=128,
- return_pos=False,
- return_input_mask=False,
- return_max_len=False,
- return_num_token=False,
- return_seq_lens=False):
- """
- Pad the instances to the max sequence length in batch, and generate the
- corresponding position data and input mask.
- """
- return_list = []
- #max_len = max(len(inst) for inst in insts)
- max_len = max_seq_len
- # Any token included in dict can be used to pad, since the paddings' loss
- # will be masked out by weights and make no effect on parameter gradients.
-
- inst_data = np.array([
- list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in insts
- ])
- return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
-
- # position data
- if return_pos:
- inst_pos = np.array([
- list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
- for inst in insts
- ])
-
- return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
-
- if return_input_mask:
- # This is used to avoid attention on paddings.
- input_mask_data = np.array(
- [[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts])
- input_mask_data = np.expand_dims(input_mask_data, axis=-1)
- return_list += [input_mask_data.astype("float32")]
-
- if return_max_len:
- return_list += [max_len]
-
- if return_num_token:
- num_token = 0
- for inst in insts:
- num_token += len(inst)
- return_list += [num_token]
-
- if return_seq_lens:
- seq_lens = np.array([len(inst) for inst in insts])
- return_list += [seq_lens.astype("int64").reshape([-1, 1])]
-
- return return_list if len(return_list) > 1 else return_list[0]
diff --git a/examples/Cpp/PaddleNLP/bert/benchmark.py b/examples/Cpp/PaddleNLP/bert/benchmark.py
deleted file mode 100644
index bdef982830cea34f5a9ea925e6759b48b86ce7a7..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/benchmark.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from __future__ import unicode_literals, absolute_import
-import os
-import sys
-import time
-import json
-import requests
-import numpy as np
-from paddle_serving_client import Client
-from paddle_serving_client.utils import MultiThreadRunner
-from paddle_serving_client.utils import benchmark_args, show_latency
-from paddle_serving_app.reader import ChineseBertReader
-
-args = benchmark_args()
-
-
-def single_func(idx, resource):
- fin = open("data-c.txt")
- dataset = []
- for line in fin:
- dataset.append(line.strip())
-
- profile_flags = False
- latency_flags = False
- if os.getenv("FLAGS_profile_client"):
- profile_flags = True
- if os.getenv("FLAGS_serving_latency"):
- latency_flags = True
- latency_list = []
-
- if args.request == "rpc":
- reader = ChineseBertReader({"max_seq_len": 128})
- fetch = ["pooled_output"]
- client = Client()
- client.load_client_config(args.model)
- client.connect([resource["endpoint"][idx % len(resource["endpoint"])]])
- start = time.time()
- for i in range(turns):
- if args.batch_size >= 1:
- l_start = time.time()
- feed_batch = []
- b_start = time.time()
- for bi in range(args.batch_size):
- feed_dict = reader.process(dataset[bi])
- for key in feed_dict.keys():
- feed_dict[key] = np.array(feed_dict[key]).reshape(
- (1, 128, 1))
- feed_batch.append(feed_dict)
- b_end = time.time()
-
- if profile_flags:
- sys.stderr.write(
- "PROFILE\tpid:{}\tbert_pre_0:{} bert_pre_1:{}\n".format(
- os.getpid(),
- int(round(b_start * 1000000)),
- int(round(b_end * 1000000))))
- result = client.predict(
- feed=feed_batch, fetch=fetch, batch=True)
-
- l_end = time.time()
- if latency_flags:
- latency_list.append(l_end * 1000 - l_start * 1000)
- else:
- print("unsupport batch size {}".format(args.batch_size))
-
- elif args.request == "http":
- reader = ChineseBertReader({"max_seq_len": 128})
- fetch = ["pooled_output"]
- server = "http://" + resource["endpoint"][idx % len(resource[
- "endpoint"])] + "/bert/prediction"
- start = time.time()
- for i in range(turns):
- if args.batch_size >= 1:
- l_start = time.time()
- feed_batch = []
- b_start = time.time()
- for bi in range(args.batch_size):
- feed_batch.append({"words": dataset[bi]})
- req = json.dumps({"feed": feed_batch, "fetch": fetch})
- b_end = time.time()
-
- if profile_flags:
- sys.stderr.write(
- "PROFILE\tpid:{}\tbert_pre_0:{} bert_pre_1:{}\n".format(
- os.getpid(),
- int(round(b_start * 1000000)),
- int(round(b_end * 1000000))))
- result = requests.post(
- server,
- data=req,
- headers={"Content-Type": "application/json"})
- l_end = time.time()
- if latency_flags:
- latency_list.append(l_end * 1000 - l_start * 1000)
- else:
- print("unsupport batch size {}".format(args.batch_size))
-
- else:
- raise ValueError("not implemented {} request".format(args.request))
- end = time.time()
- if latency_flags:
- return [[end - start], latency_list]
- else:
- return [[end - start]]
-
-
-if __name__ == '__main__':
- multi_thread_runner = MultiThreadRunner()
- endpoint_list = ["127.0.0.1:9292", "127.0.0.1:9293"]
- turns = 100
- start = time.time()
- result = multi_thread_runner.run(
- single_func, args.thread, {"endpoint": endpoint_list,
- "turns": turns})
- end = time.time()
- total_cost = end - start
-
- avg_cost = 0
- for i in range(args.thread):
- avg_cost += result[0][i]
- avg_cost = avg_cost / args.thread
-
- print("total cost: {}s".format(total_cost))
- print("each thread cost: {}s. ".format(avg_cost))
- print("qps: {}samples/s".format(args.batch_size * args.thread * turns /
- total_cost))
- if os.getenv("FLAGS_serving_latency"):
- show_latency(result[1])
diff --git a/examples/Cpp/PaddleNLP/bert/benchmark.sh b/examples/Cpp/PaddleNLP/bert/benchmark.sh
deleted file mode 100644
index 7e374db3ee5a5bdccdc75dc2884b9dbbfcb60eca..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/benchmark.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-rm profile_log*
-export CUDA_VISIBLE_DEVICES=0,1
-export FLAGS_profile_server=1
-export FLAGS_profile_client=1
-export FLAGS_serving_latency=1
-
-gpu_id=0
-#save cpu and gpu utilization log
-if [ -d utilization ];then
- rm -rf utilization
-else
- mkdir utilization
-fi
-#start server
-$PYTHONROOT/bin/python3 -m paddle_serving_server.serve --model $1 --port 9292 --thread 4 --gpu_ids 0,1 --mem_optim --ir_optim > elog 2>&1 &
-sleep 5
-
-#warm up
-$PYTHONROOT/bin/python3 benchmark.py --thread 4 --batch_size 1 --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1
-echo -e "import psutil\nimport time\nwhile True:\n\tcpu_res = psutil.cpu_percent()\n\twith open('cpu.txt', 'a+') as f:\n\t\tf.write(f'{cpu_res}\\\n')\n\ttime.sleep(0.1)" > cpu.py
-for thread_num in 1 4 8 16
-do
-for batch_size in 1 4 16 64
-do
- job_bt=`date '+%Y%m%d%H%M%S'`
- nvidia-smi --id=0 --query-compute-apps=used_memory --format=csv -lms 100 > gpu_memory_use.log 2>&1 &
- nvidia-smi --id=0 --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
- rm -rf cpu.txt
- $PYTHONROOT/bin/python3 cpu.py &
- gpu_memory_pid=$!
- $PYTHONROOT/bin/python3 benchmark.py --thread $thread_num --batch_size $batch_size --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1
- kill `ps -ef|grep used_memory|awk '{print $2}'` > /dev/null
- kill `ps -ef|grep utilization.gpu|awk '{print $2}'` > /dev/null
- kill `ps -ef|grep cpu.py|awk '{print $2}'` > /dev/null
- echo "model_name:" $1
- echo "thread_num:" $thread_num
- echo "batch_size:" $batch_size
- echo "=================Done===================="
- echo "model_name:$1" >> profile_log_$1
- echo "batch_size:$batch_size" >> profile_log_$1
- job_et=`date '+%Y%m%d%H%M%S'`
- awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "CPU_UTILIZATION:", max}' cpu.txt >> profile_log_$1
- awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "MAX_GPU_MEMORY:", max}' gpu_memory_use.log >> profile_log_$1
- awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "GPU_UTILIZATION:", max}' gpu_utilization.log >> profile_log_$1
- rm -rf gpu_use.log gpu_utilization.log
- $PYTHONROOT/bin/python3 ../util/show_profile.py profile $thread_num >> profile_log_$1
- tail -n 8 profile >> profile_log_$1
- echo "" >> profile_log_$1
-done
-done
-
-#Divided log
-awk 'BEGIN{RS="\n\n"}{i++}{print > "bert_log_"i}' profile_log_$1
-mkdir bert_log && mv bert_log_* bert_log
-ps -ef|grep 'serving'|grep -v grep|cut -c 9-15 | xargs kill -9
diff --git a/examples/Cpp/PaddleNLP/bert/benchmark_with_profile.sh b/examples/Cpp/PaddleNLP/bert/benchmark_with_profile.sh
deleted file mode 100644
index f36fbbce917d2956195d08e7638e06d84caf961a..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/benchmark_with_profile.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-export CUDA_VISIBLE_DEVICES=0,1
-python -m paddle_serving_server.serve --model bert_seq20_model/ --port 9295 --thread 4 --gpu_ids 0,1 2> elog > stdlog &
-export FLAGS_profile_client=1
-export FLAGS_profile_server=1
-sleep 5
-thread_num=4
-python benchmark_batch.py --thread ${thread_num} --batch_size 64 --model serving_client_conf/serving_client_conf.prototxt 2> profile
-
-python show_profile.py profile ${thread_num}
-python timeline_trace.py profile trace
diff --git a/examples/Cpp/PaddleNLP/bert/bert_client.py b/examples/Cpp/PaddleNLP/bert/bert_client.py
deleted file mode 100644
index 4111589b3ddfde980e415fbac1a5f38f4abafada..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/bert_client.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# coding:utf-8
-# pylint: disable=doc-string-missing
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-from paddle_serving_client import Client
-from paddle_serving_client.utils import benchmark_args
-from paddle_serving_app.reader import ChineseBertReader
-import numpy as np
-args = benchmark_args()
-
-reader = ChineseBertReader({"max_seq_len": 128})
-fetch = ["pooled_output"]
-endpoint_list = ['127.0.0.1:9292']
-client = Client()
-client.load_client_config(args.model)
-client.connect(endpoint_list)
-
-for line in sys.stdin:
- feed_dict = reader.process(line)
- for key in feed_dict.keys():
- feed_dict[key] = np.array(feed_dict[key]).reshape((128, 1))
- #print(feed_dict)
- result = client.predict(feed=feed_dict, fetch=fetch, batch=False)
-print(result)
diff --git a/examples/Cpp/PaddleNLP/bert/bert_gpu_server.py b/examples/Cpp/PaddleNLP/bert/bert_gpu_server.py
deleted file mode 100644
index 7708a078636fd876c40e88d1441bc711d599f8a6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/bert_gpu_server.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-from paddle_serving_server import OpMaker
-from paddle_serving_server import OpSeqMaker
-from paddle_serving_server import Server
-
-op_maker = OpMaker()
-read_op = op_maker.create('general_reader')
-general_infer_op = op_maker.create('general_infer')
-general_response_op = op_maker.create('general_response')
-
-op_seq_maker = OpSeqMaker()
-op_seq_maker.add_op(read_op)
-op_seq_maker.add_op(general_infer_op)
-op_seq_maker.add_op(general_response_op)
-
-server = Server()
-server.set_op_sequence(op_seq_maker.get_op_sequence())
-server.set_num_threads(8)
-server.set_memory_optimize(True)
-server.set_gpuid(1)
-
-server.load_model_config(sys.argv[1])
-port = int(sys.argv[2])
-gpuid = sys.argv[3]
-server.set_gpuid(gpuid)
-server.prepare_server(workdir="work_dir1", port=port, device="gpu")
-server.run_server()
diff --git a/examples/Cpp/PaddleNLP/bert/bert_httpclient.py b/examples/Cpp/PaddleNLP/bert/bert_httpclient.py
deleted file mode 100644
index 255c78ec0ca7e33ddd1486f05cf6d9d225a5f406..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/bert_httpclient.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# coding:utf-8
-# pylint: disable=doc-string-missing
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-from paddle_serving_client import HttpClient
-from paddle_serving_client.utils import benchmark_args
-from paddle_serving_app.reader import ChineseBertReader
-import numpy as np
-args = benchmark_args()
-
-reader = ChineseBertReader({"max_seq_len": 128})
-fetch = ["pooled_output"]
-endpoint_list = ['127.0.0.1:9292']
-client = HttpClient()
-client.load_client_config(args.model)
-'''
-if you want use GRPC-client, set_use_grpc_client(True)
-or you can directly use client.grpc_client_predict(...)
-as for HTTP-client,set_use_grpc_client(False)(which is default)
-or you can directly use client.http_client_predict(...)
-'''
-#client.set_use_grpc_client(True)
-'''
-if you want to enable Encrypt Module,uncommenting the following line
-'''
-#client.use_key("./key")
-'''
-if you want to compress,uncommenting the following line
-'''
-#client.set_response_compress(True)
-#client.set_request_compress(True)
-'''
-we recommend use Proto data format in HTTP-body, set True(which is default)
-if you want use JSON data format in HTTP-body, set False
-'''
-#client.set_http_proto(True)
-client.connect(endpoint_list)
-
-for line in sys.stdin:
- feed_dict = reader.process(line)
- for key in feed_dict.keys():
- feed_dict[key] = np.array(feed_dict[key]).reshape((128, 1))
- #print(feed_dict)
- result = client.predict(feed=feed_dict, fetch=fetch, batch=False)
-print(result)
diff --git a/examples/Cpp/PaddleNLP/bert/bert_reader.py b/examples/Cpp/PaddleNLP/bert/bert_reader.py
deleted file mode 100644
index 366c19b5dc7de7e14979124272329b8ba00fdb3c..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/bert_reader.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-from batching import pad_batch_data
-import tokenization
-
-
-class BertReader():
- def __init__(self, vocab_file="", max_seq_len=128):
- self.vocab_file = vocab_file
- self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file)
- self.max_seq_len = max_seq_len
- self.vocab = self.tokenizer.vocab
- self.pad_id = self.vocab["[PAD]"]
- self.cls_id = self.vocab["[CLS]"]
- self.sep_id = self.vocab["[SEP]"]
- self.mask_id = self.vocab["[MASK]"]
-
- def pad_batch(self, token_ids, text_type_ids, position_ids):
- batch_token_ids = [token_ids]
- batch_text_type_ids = [text_type_ids]
- batch_position_ids = [position_ids]
-
- padded_token_ids, input_mask = pad_batch_data(
- batch_token_ids,
- max_seq_len=self.max_seq_len,
- pad_idx=self.pad_id,
- return_input_mask=True)
- padded_text_type_ids = pad_batch_data(
- batch_text_type_ids,
- max_seq_len=self.max_seq_len,
- pad_idx=self.pad_id)
- padded_position_ids = pad_batch_data(
- batch_position_ids,
- max_seq_len=self.max_seq_len,
- pad_idx=self.pad_id)
- return padded_token_ids, padded_position_ids, padded_text_type_ids, input_mask
-
- def process(self, sent):
- text_a = tokenization.convert_to_unicode(sent)
- tokens_a = self.tokenizer.tokenize(text_a)
- if len(tokens_a) > self.max_seq_len - 2:
- tokens_a = tokens_a[0:(self.max_seq_len - 2)]
- tokens = []
- text_type_ids = []
- tokens.append("[CLS]")
- text_type_ids.append(0)
- for token in tokens_a:
- tokens.append(token)
- text_type_ids.append(0)
- token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
- position_ids = list(range(len(token_ids)))
- p_token_ids, p_pos_ids, p_text_type_ids, input_mask = \
- self.pad_batch(token_ids, text_type_ids, position_ids)
- feed_result = {
- "input_ids": p_token_ids.reshape(-1).tolist(),
- "position_ids": p_pos_ids.reshape(-1).tolist(),
- "segment_ids": p_text_type_ids.reshape(-1).tolist(),
- "input_mask": input_mask.reshape(-1).tolist()
- }
- return feed_result
diff --git a/examples/Cpp/PaddleNLP/bert/bert_server.py b/examples/Cpp/PaddleNLP/bert/bert_server.py
deleted file mode 100644
index 35d38be0cac50b899b58085c7f103f32537859c4..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/bert_server.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-from paddle_serving_server import OpMaker
-from paddle_serving_server import OpSeqMaker
-from paddle_serving_server import Server
-
-op_maker = OpMaker()
-read_op = op_maker.create('general_reader')
-general_infer_op = op_maker.create('general_infer')
-general_response_op = op_maker.create('general_response')
-
-op_seq_maker = OpSeqMaker()
-op_seq_maker.add_op(read_op)
-op_seq_maker.add_op(general_infer_op)
-op_seq_maker.add_op(general_response_op)
-
-server = Server()
-server.set_op_sequence(op_seq_maker.get_op_sequence())
-server.set_num_threads(4)
-
-server.load_model_config(sys.argv[1])
-port = int(sys.argv[2])
-server.prepare_server(workdir="work_dir1", port=port, device="cpu")
-server.run_server()
diff --git a/examples/Cpp/PaddleNLP/bert/get_data.sh b/examples/Cpp/PaddleNLP/bert/get_data.sh
deleted file mode 100644
index 5e17d10d144c5c9e202a5cb9c4b90a62caeeed9a..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/get_data.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-wget https://paddle-serving.bj.bcebos.com/bert_example/data-c.txt --no-check-certificate
-wget https://paddle-serving.bj.bcebos.com/bert_example/vocab.txt --no-check-certificate
diff --git a/examples/Cpp/PaddleNLP/bert/prepare_model.py b/examples/Cpp/PaddleNLP/bert/prepare_model.py
deleted file mode 100644
index e883b6b15746946a7f3412fe64c5933c4cfb37ab..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/prepare_model.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-import paddlehub as hub
-import paddle.fluid as fluid
-import sys
-import paddle_serving_client.io as serving_io
-import paddle
-
-paddle.enable_static()
-model_name = "bert_chinese_L-12_H-768_A-12"
-module = hub.Module(name=model_name)
-inputs, outputs, program = module.context(
- trainable=True, max_seq_len=int(sys.argv[1]))
-place = fluid.core_avx.CPUPlace()
-exe = fluid.Executor(place)
-input_ids = inputs["input_ids"]
-position_ids = inputs["position_ids"]
-segment_ids = inputs["segment_ids"]
-input_mask = inputs["input_mask"]
-pooled_output = outputs["pooled_output"]
-sequence_output = outputs["sequence_output"]
-
-feed_var_names = [
- input_ids.name, position_ids.name, segment_ids.name, input_mask.name
-]
-
-target_vars = [pooled_output, sequence_output]
-
-serving_io.save_model(
- "bert_seq{}_model".format(sys.argv[1]),
- "bert_seq{}_client".format(sys.argv[1]), {
- "input_ids": input_ids,
- "position_ids": position_ids,
- "segment_ids": segment_ids,
- "input_mask": input_mask,
- }, {"pooled_output": pooled_output,
- "sequence_output": sequence_output}, program)
diff --git a/examples/Cpp/PaddleNLP/bert/test_multi_fetch_client.py b/examples/Cpp/PaddleNLP/bert/test_multi_fetch_client.py
deleted file mode 100644
index 1ee540097c32429348fbeb504278fb986bd3a9e7..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/test_multi_fetch_client.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-from paddle_serving_app.reader import ChineseBertReader
-import sys
-import numpy as np
-
-client = Client()
-client.load_client_config("./bert_seq32_client/serving_client_conf.prototxt")
-client.connect(["127.0.0.1:9292"])
-
-reader = ChineseBertReader({"max_seq_len": 32})
-fetch = ["sequence_10", "sequence_12", "pooled_output"]
-expected_shape = {
- "sequence_10": (4, 32, 768),
- "sequence_12": (4, 32, 768),
- "pooled_output": (4, 768)
-}
-batch_size = 4
-feed_batch = {}
-
-batch_len = 0
-for line in sys.stdin:
- feed = reader.process(line)
- if batch_len == 0:
- for key in feed.keys():
- val_len = len(feed[key])
- feed_batch[key] = np.array(feed[key]).reshape((1, val_len, 1))
- continue
- if len(feed_batch) < batch_size:
- for key in feed.keys():
- np.concatenate([
- feed_batch[key], np.array(feed[key]).reshape((1, val_len, 1))
- ])
- else:
- fetch_map = client.predict(feed=feed_batch, fetch=fetch)
- feed_batch = []
- for var_name in fetch:
- if fetch_map[var_name].shape != expected_shape[var_name]:
- print("fetch var {} shape error.".format(var_name))
- sys.exit(1)
diff --git a/examples/Cpp/PaddleNLP/bert/tokenization.py b/examples/Cpp/PaddleNLP/bert/tokenization.py
deleted file mode 100644
index 0d84ed38468207e853e5270a59179b4274900cb0..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/bert/tokenization.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# coding=utf-8
-# Copyright 2018 The Google AI Language Team Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Tokenization classes."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import collections
-import io
-import unicodedata
-import six
-import sentencepiece as spm
-import pickle
-
-
-def convert_to_unicode(text): # pylint: disable=doc-string-with-all-args
- """Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
- if six.PY3:
- if isinstance(text, str):
- return text
- elif isinstance(text, bytes):
- return text.decode("utf-8", "ignore")
- else:
- raise ValueError("Unsupported string type: %s" % (type(text)))
- elif six.PY2:
- if isinstance(text, str):
- return text.decode("utf-8", "ignore")
- elif isinstance(text, unicode): # noqa
- return text
- else:
- raise ValueError("Unsupported string type: %s" % (type(text)))
- else:
- raise ValueError("Not running on Python2 or Python 3?")
-
-
-def printable_text(text): # pylint: disable=doc-string-with-all-args
- """Returns text encoded in a way suitable for print or `tf.logging`."""
-
- # These functions want `str` for both Python2 and Python3, but in one case
- # it's a Unicode string and in the other it's a byte string.
- if six.PY3:
- if isinstance(text, str):
- return text
- elif isinstance(text, bytes):
- return text.decode("utf-8", "ignore")
- else:
- raise ValueError("Unsupported string type: %s" % (type(text)))
- elif six.PY2:
- if isinstance(text, str):
- return text
- elif isinstance(text, unicode): # noqa
- return text.encode("utf-8")
- else:
- raise ValueError("Unsupported string type: %s" % (type(text)))
- else:
- raise ValueError("Not running on Python2 or Python 3?")
-
-
-def load_vocab(vocab_file): # pylint: disable=doc-string-with-all-args, doc-string-with-returns
- """Loads a vocabulary file into a dictionary."""
- vocab = collections.OrderedDict()
- fin = io.open(vocab_file, "r", encoding="UTF-8")
- for num, line in enumerate(fin):
- items = convert_to_unicode(line.strip()).split("\t")
- if len(items) > 2:
- break
- token = items[0]
- index = items[1] if len(items) == 2 else num
- token = token.strip()
- vocab[token] = int(index)
- fin.close()
- return vocab
-
-
-def convert_by_vocab(vocab, items):
- """Converts a sequence of [tokens|ids] using the vocab."""
- output = []
- for item in items:
- output.append(vocab[item])
- return output
-
-
-def convert_tokens_to_ids(vocab, tokens):
- return convert_by_vocab(vocab, tokens)
-
-
-def convert_ids_to_tokens(inv_vocab, ids):
- return convert_by_vocab(inv_vocab, ids)
-
-
-def whitespace_tokenize(text):
- """Runs basic whitespace cleaning and splitting on a peice of text."""
- text = text.strip()
- if not text:
- return []
- tokens = text.split()
- return tokens
-
-
-class FullTokenizer(object):
- """Runs end-to-end tokenziation."""
-
- def __init__(self,
- vocab_file,
- do_lower_case=True,
- use_sentence_piece_vocab=False):
- self.vocab = load_vocab(vocab_file)
- self.inv_vocab = {v: k for k, v in self.vocab.items()}
- self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
- self.use_sentence_piece_vocab = use_sentence_piece_vocab
- self.wordpiece_tokenizer = WordpieceTokenizer(
- vocab=self.vocab,
- use_sentence_piece_vocab=self.use_sentence_piece_vocab)
-
- def tokenize(self, text):
- split_tokens = []
- for token in self.basic_tokenizer.tokenize(text):
- for sub_token in self.wordpiece_tokenizer.tokenize(token):
- split_tokens.append(sub_token)
-
- return split_tokens
-
- def convert_tokens_to_ids(self, tokens):
- return convert_by_vocab(self.vocab, tokens)
-
- def convert_ids_to_tokens(self, ids):
- return convert_by_vocab(self.inv_vocab, ids)
-
-
-class CharTokenizer(object):
- """Runs end-to-end tokenziation."""
-
- def __init__(self, vocab_file, do_lower_case=True):
- self.vocab = load_vocab(vocab_file)
- self.inv_vocab = {v: k for k, v in self.vocab.items()}
- self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
-
- def tokenize(self, text):
- split_tokens = []
- for token in text.lower().split(" "):
- for sub_token in self.wordpiece_tokenizer.tokenize(token):
- split_tokens.append(sub_token)
-
- return split_tokens
-
- def convert_tokens_to_ids(self, tokens):
- return convert_by_vocab(self.vocab, tokens)
-
- def convert_ids_to_tokens(self, ids):
- return convert_by_vocab(self.inv_vocab, ids)
-
-
-class WSSPTokenizer(object): # pylint: disable=doc-string-missing
- def __init__(self, vocab_file, sp_model_dir, word_dict, ws=True,
- lower=True):
- self.vocab = load_vocab(vocab_file)
- self.inv_vocab = {v: k for k, v in self.vocab.items()}
- self.ws = ws
- self.lower = lower
- self.dict = pickle.load(open(word_dict, 'rb'))
- self.sp_model = spm.SentencePieceProcessor()
- self.window_size = 5
- self.sp_model.Load(sp_model_dir)
-
- def cut(self, chars): # pylint: disable=doc-string-missing
- words = []
- idx = 0
- while idx < len(chars):
- matched = False
- for i in range(self.window_size, 0, -1):
- cand = chars[idx:idx + i]
- if cand in self.dict:
- words.append(cand)
- matched = True
- break
- if not matched:
- i = 1
- words.append(chars[idx])
- idx += i
- return words
-
- def tokenize(self, text, unk_token="[UNK]"): # pylint: disable=doc-string-missing
- text = convert_to_unicode(text)
- if self.ws:
- text = [s for s in self.cut(text) if s != ' ']
- else:
- text = text.split(' ')
- if self.lower:
- text = [s.lower() for s in text]
- text = ' '.join(text)
- tokens = self.sp_model.EncodeAsPieces(text)
- in_vocab_tokens = []
- for token in tokens:
- if token in self.vocab:
- in_vocab_tokens.append(token)
- else:
- in_vocab_tokens.append(unk_token)
- return in_vocab_tokens
-
- def convert_tokens_to_ids(self, tokens):
- return convert_by_vocab(self.vocab, tokens)
-
- def convert_ids_to_tokens(self, ids):
- return convert_by_vocab(self.inv_vocab, ids)
-
-
-class BasicTokenizer(object):
- """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
-
- def __init__(self, do_lower_case=True):
- """Constructs a BasicTokenizer.
-
- Args:
- do_lower_case: Whether to lower case the input.
- """
- self.do_lower_case = do_lower_case
-
- def tokenize(self, text): # pylint: disable=doc-string-with-all-args, doc-string-with-returns
- """Tokenizes a piece of text."""
- text = convert_to_unicode(text)
- text = self._clean_text(text)
-
- # This was added on November 1st, 2018 for the multilingual and Chinese
- # models. This is also applied to the English models now, but it doesn't
- # matter since the English models were not trained on any Chinese data
- # and generally don't have any Chinese data in them (there are Chinese
- # characters in the vocabulary because Wikipedia does have some Chinese
- # words in the English Wikipedia.).
- text = self._tokenize_chinese_chars(text)
-
- orig_tokens = whitespace_tokenize(text)
- split_tokens = []
- for token in orig_tokens:
- if self.do_lower_case:
- token = token.lower()
- token = self._run_strip_accents(token)
- split_tokens.extend(self._run_split_on_punc(token))
-
- output_tokens = whitespace_tokenize(" ".join(split_tokens))
- return output_tokens
-
- def _run_strip_accents(self, text):
- """Strips accents from a piece of text."""
- text = unicodedata.normalize("NFD", text)
- output = []
- for char in text:
- cat = unicodedata.category(char)
- if cat == "Mn":
- continue
- output.append(char)
- return "".join(output)
-
- def _run_split_on_punc(self, text):
- """Splits punctuation on a piece of text."""
- chars = list(text)
- i = 0
- start_new_word = True
- output = []
- while i < len(chars):
- char = chars[i]
- if _is_punctuation(char):
- output.append([char])
- start_new_word = True
- else:
- if start_new_word:
- output.append([])
- start_new_word = False
- output[-1].append(char)
- i += 1
-
- return ["".join(x) for x in output]
-
- def _tokenize_chinese_chars(self, text):
- """Adds whitespace around any CJK character."""
- output = []
- for char in text:
- cp = ord(char)
- if self._is_chinese_char(cp):
- output.append(" ")
- output.append(char)
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
-
- def _is_chinese_char(self, cp):
- """Checks whether CP is the codepoint of a CJK character."""
- # This defines a "chinese character" as anything in the CJK Unicode block:
- # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
- #
- # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
- # despite its name. The modern Korean Hangul alphabet is a different block,
- # as is Japanese Hiragana and Katakana. Those alphabets are used to write
- # space-separated words, so they are not treated specially and handled
- # like the all of the other languages.
- if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
- (cp >= 0x3400 and cp <= 0x4DBF) or #
- (cp >= 0x20000 and cp <= 0x2A6DF) or #
- (cp >= 0x2A700 and cp <= 0x2B73F) or #
- (cp >= 0x2B740 and cp <= 0x2B81F) or #
- (cp >= 0x2B820 and cp <= 0x2CEAF) or
- (cp >= 0xF900 and cp <= 0xFAFF) or #
- (cp >= 0x2F800 and cp <= 0x2FA1F)): #
- return True
-
- return False
-
- def _clean_text(self, text):
- """Performs invalid character removal and whitespace cleanup on text."""
- output = []
- for char in text:
- cp = ord(char)
- if cp == 0 or cp == 0xfffd or _is_control(char):
- continue
- if _is_whitespace(char):
- output.append(" ")
- else:
- output.append(char)
- return "".join(output)
-
-
-class WordpieceTokenizer(object):
- """Runs WordPiece tokenziation."""
-
- def __init__(self,
- vocab,
- unk_token="[UNK]",
- max_input_chars_per_word=100,
- use_sentence_piece_vocab=False):
- self.vocab = vocab
- self.unk_token = unk_token
- self.max_input_chars_per_word = max_input_chars_per_word
- self.use_sentence_piece_vocab = use_sentence_piece_vocab
-
- def tokenize(self, text): # pylint: disable=doc-string-with-all-args
- """Tokenizes a piece of text into its word pieces.
-
- This uses a greedy longest-match-first algorithm to perform tokenization
- using the given vocabulary.
-
- For example:
- input = "unaffable"
- output = ["un", "##aff", "##able"]
-
- Args:
- text: A single token or whitespace separated tokens. This should have
- already been passed through `BasicTokenizer.
-
- Returns:
- A list of wordpiece tokens.
- """
-
- text = convert_to_unicode(text)
-
- output_tokens = []
- for token in whitespace_tokenize(text):
- chars = list(token)
- if len(chars) > self.max_input_chars_per_word:
- output_tokens.append(self.unk_token)
- continue
-
- is_bad = False
- start = 0
- sub_tokens = []
- while start < len(chars):
- end = len(chars)
- cur_substr = None
- while start < end:
- substr = "".join(chars[start:end])
- if start == 0 and self.use_sentence_piece_vocab:
- substr = u'\u2581' + substr
- if start > 0 and not self.use_sentence_piece_vocab:
- substr = "##" + substr
- if substr in self.vocab:
- cur_substr = substr
- break
- end -= 1
- if cur_substr is None:
- is_bad = True
- break
- sub_tokens.append(cur_substr)
- start = end
-
- if is_bad:
- output_tokens.append(self.unk_token)
- else:
- output_tokens.extend(sub_tokens)
- return output_tokens
-
-
-def _is_whitespace(char):
- """Checks whether `chars` is a whitespace character."""
- # \t, \n, and \r are technically contorl characters but we treat them
- # as whitespace since they are generally considered as such.
- if char == " " or char == "\t" or char == "\n" or char == "\r":
- return True
- cat = unicodedata.category(char)
- if cat == "Zs":
- return True
- return False
-
-
-def _is_control(char):
- """Checks whether `chars` is a control character."""
- # These are technically control characters but we count them as whitespace
- # characters.
- if char == "\t" or char == "\n" or char == "\r":
- return False
- cat = unicodedata.category(char)
- if cat.startswith("C"):
- return True
- return False
-
-
-def _is_punctuation(char):
- """Checks whether `chars` is a punctuation character."""
- cp = ord(char)
- # We treat all non-letter/number ASCII as punctuation.
- # Characters such as "^", "$", and "`" are not in the Unicode
- # Punctuation class but we treat them as punctuation anyways, for
- # consistency.
- if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
- (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
- return True
- cat = unicodedata.category(char)
- if cat.startswith("P"):
- return True
- return False
diff --git a/examples/Cpp/PaddleNLP/lac/README.md b/examples/Cpp/PaddleNLP/lac/README.md
deleted file mode 100755
index 108d5051b50b2b639e28c023364d36ec9a0a0a44..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/lac/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-## Chinese Word Segmentation
-
-([简体中文](./README_CN.md)|English)
-
-### Get Model
-```
-python3 -m paddle_serving_app.package --get_model lac
-tar -xzvf lac.tar.gz
-```
-
-#### Start inference service(Support BRPC-Client/GRPC-Client/Http-Client)
-
-```
-python3 -m paddle_serving_server.serve --model lac_model/ --port 9292
-```
-### BRPC Infer
-```
-echo "我爱北京天安门" | python3 lac_client.py lac_client/serving_client_conf.prototxt
-```
-
-It will get the segmentation result.
-
-### GRPC/Http Infer
-```
-echo "我爱北京天安门" | python3 lac_http_client.py lac_client/serving_client_conf.prototxt
-```
diff --git a/examples/Cpp/PaddleNLP/lac/README_CN.md b/examples/Cpp/PaddleNLP/lac/README_CN.md
deleted file mode 100755
index 5634128c80c23126836677f4cb434df68dde9056..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/lac/README_CN.md
+++ /dev/null
@@ -1,26 +0,0 @@
-## 中文分词模型
-
-(简体中文|[English](./README.md))
-
-### 获取模型
-```
-python3 -m paddle_serving_app.package --get_model lac
-tar -xzvf lac.tar.gz
-```
-
-#### 开启预测服务(支持BRPC-Client/GRPC-Client/Http-Client)
-
-```
-python3 -m paddle_serving_server.serve --model lac_model/ --port 9292
-```
-### 执行BRPC预测
-```
-echo "我爱北京天安门" | python3 lac_client.py lac_client/serving_client_conf.prototxt
-```
-
-我们就能得到分词结果
-
-### 执行GRPC/Http预测
-```
-echo "我爱北京天安门" | python3 lac_http_client.py lac_client/serving_client_conf.prototxt
-```
diff --git a/examples/Cpp/PaddleNLP/lac/benchmark.py b/examples/Cpp/PaddleNLP/lac/benchmark.py
deleted file mode 100644
index 64e935a608477d5841df1b64abf7b6eb35dd1a4b..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/lac/benchmark.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import sys
-import time
-import requests
-from paddle_serving_app.reader import LACReader
-from paddle_serving_client import Client
-from paddle_serving_client.utils import MultiThreadRunner
-from paddle_serving_client.utils import benchmark_args
-
-args = benchmark_args()
-
-
-def single_func(idx, resource):
- reader = LACReader()
- start = time.time()
- if args.request == "rpc":
- client = Client()
- client.load_client_config(args.model)
- client.connect([args.endpoint])
- fin = open("jieba_test.txt")
- for line in fin:
- feed_data = reader.process(line)
- fetch_map = client.predict(
- feed={"words": feed_data}, fetch=["crf_decode"])
- elif args.request == "http":
- fin = open("jieba_test.txt")
- for line in fin:
- req_data = {"words": line.strip(), "fetch": ["crf_decode"]}
- r = requests.post(
- "http://{}/lac/prediction".format(args.endpoint),
- data={"words": line.strip(),
- "fetch": ["crf_decode"]})
- end = time.time()
- return [[end - start]]
-
-
-multi_thread_runner = MultiThreadRunner()
-result = multi_thread_runner.run(single_func, args.thread, {})
-print(result)
diff --git a/examples/Cpp/PaddleNLP/lac/lac_client.py b/examples/Cpp/PaddleNLP/lac/lac_client.py
deleted file mode 100644
index 568b08d8b3af86fd7aa7b20660aeb4acbf060e04..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/lac/lac_client.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# encoding=utf-8
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client import Client
-from paddle_serving_app.reader import LACReader
-import sys
-import os
-import io
-import numpy as np
-
-client = Client()
-client.load_client_config(sys.argv[1])
-client.connect(["127.0.0.1:9292"])
-
-reader = LACReader()
-for line in sys.stdin:
- if len(line) <= 0:
- continue
- feed_data = reader.process(line)
- if len(feed_data) <= 0:
- continue
- print(feed_data)
- #fetch_map = client.predict(feed={"words": np.array(feed_data).reshape(len(feed_data), 1), "words.lod": [0, len(feed_data)]}, fetch=["crf_decode"], batch=True)
- fetch_map = client.predict(
- feed={
- "words": np.array(feed_data + feed_data).reshape(
- len(feed_data) * 2, 1),
- "words.lod": [0, len(feed_data), 2 * len(feed_data)]
- },
- fetch=["crf_decode"],
- batch=True)
- print(fetch_map)
- begin = fetch_map['crf_decode.lod'][0]
- end = fetch_map['crf_decode.lod'][1]
- segs = reader.parse_result(line, fetch_map["crf_decode"][begin:end])
- print("word_seg: " + "|".join(str(words) for words in segs))
diff --git a/examples/Cpp/PaddleNLP/lac/lac_http_client.py b/examples/Cpp/PaddleNLP/lac/lac_http_client.py
deleted file mode 100755
index 5cdfaf1df46a43d04b7e09f0f6376364a9dcb89f..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/lac/lac_http_client.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# encoding=utf-8
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client import HttpClient
-from paddle_serving_app.reader import LACReader
-import sys
-import os
-import io
-import numpy as np
-
-client = HttpClient()
-client.load_client_config(sys.argv[1])
-'''
-if you want use GRPC-client, set_use_grpc_client(True)
-or you can directly use client.grpc_client_predict(...)
-as for HTTP-client,set_use_grpc_client(False)(which is default)
-or you can directly use client.http_client_predict(...)
-'''
-#client.set_use_grpc_client(True)
-'''
-if you want to enable Encrypt Module,uncommenting the following line
-'''
-#client.use_key("./key")
-'''
-if you want to compress,uncommenting the following line
-'''
-#client.set_response_compress(True)
-#client.set_request_compress(True)
-'''
-we recommend use Proto data format in HTTP-body, set True(which is default)
-if you want use JSON data format in HTTP-body, set False
-'''
-#client.set_http_proto(True)
-client.connect(["127.0.0.1:9292"])
-
-reader = LACReader()
-for line in sys.stdin:
- if len(line) <= 0:
- continue
- feed_data = reader.process(line)
- if len(feed_data) <= 0:
- continue
- print(feed_data)
- #fetch_map = client.predict(feed={"words": np.array(feed_data).reshape(len(feed_data), 1), "words.lod": [0, len(feed_data)]}, fetch=["crf_decode"], batch=True)
- fetch_map = client.predict(
- feed={
- "words": np.array(feed_data + feed_data).reshape(
- len(feed_data) * 2, 1),
- "words.lod": [0, len(feed_data), 2 * len(feed_data)]
- },
- fetch=["crf_decode"],
- batch=True)
- print(fetch_map)
diff --git a/examples/Cpp/PaddleNLP/lac/lac_reader.py b/examples/Cpp/PaddleNLP/lac/lac_reader.py
deleted file mode 100644
index 488e7ced1ce27f914f299c45295e82f33c68d6d0..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/lac/lac_reader.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-import sys
-py_version = sys.version_info[0]
-if py_version == 2:
- reload(sys)
- sys.setdefaultencoding('utf-8')
-import os
-import io
-
-
-def load_kv_dict(dict_path,
- reverse=False,
- delimiter="\t",
- key_func=None,
- value_func=None):
- result_dict = {}
- for line in io.open(dict_path, "r", encoding="utf8"):
- terms = line.strip("\n").split(delimiter)
- if len(terms) != 2:
- continue
- if reverse:
- value, key = terms
- else:
- key, value = terms
- if key in result_dict:
- raise KeyError("key duplicated with [%s]" % (key))
- if key_func:
- key = key_func(key)
- if value_func:
- value = value_func(value)
- result_dict[key] = value
- return result_dict
-
-
-class LACReader(object):
- """data reader"""
-
- def __init__(self, dict_folder):
- # read dict
- #basepath = os.path.abspath(__file__)
- #folder = os.path.dirname(basepath)
- word_dict_path = os.path.join(dict_folder, "word.dic")
- label_dict_path = os.path.join(dict_folder, "tag.dic")
- self.word2id_dict = load_kv_dict(
- word_dict_path, reverse=True, value_func=int)
- self.id2word_dict = load_kv_dict(word_dict_path)
- self.label2id_dict = load_kv_dict(
- label_dict_path, reverse=True, value_func=int)
- self.id2label_dict = load_kv_dict(label_dict_path)
-
- @property
- def vocab_size(self):
- """vocabulary size"""
- return max(self.word2id_dict.values()) + 1
-
- @property
- def num_labels(self):
- """num_labels"""
- return max(self.label2id_dict.values()) + 1
-
- def word_to_ids(self, words):
- """convert word to word index"""
- word_ids = []
- idx = 0
- try:
- words = unicode(words, 'utf-8')
- except:
- pass
- for word in words:
- if word not in self.word2id_dict:
- word = "OOV"
- word_id = self.word2id_dict[word]
- word_ids.append(word_id)
- return word_ids
-
- def label_to_ids(self, labels):
- """convert label to label index"""
- label_ids = []
- for label in labels:
- if label not in self.label2id_dict:
- label = "O"
- label_id = self.label2id_dict[label]
- label_ids.append(label_id)
- return label_ids
-
- def process(self, sent):
- words = sent.strip()
- word_ids = self.word_to_ids(words)
- return word_ids
-
- def parse_result(self, words, crf_decode):
- tags = [self.id2label_dict[str(x[0])] for x in crf_decode]
-
- sent_out = []
- tags_out = []
- partial_word = ""
- for ind, tag in enumerate(tags):
- if partial_word == "":
- partial_word = self.id2word_dict[str(words[ind])]
- tags_out.append(tag.split('-')[0])
- continue
- if tag.endswith("-B") or (tag == "O" and tag[ind - 1] != "O"):
- sent_out.append(partial_word)
- tags_out.append(tag.split('-')[0])
- partial_word = self.id2word_dict[str(words[ind])]
- continue
- partial_word += self.id2word_dict[str(words[ind])]
-
- if len(sent_out) < len(tags_out):
- sent_out.append(partial_word)
-
- return sent_out
diff --git a/examples/Cpp/PaddleNLP/lac/utils.py b/examples/Cpp/PaddleNLP/lac/utils.py
deleted file mode 100644
index 64602902f362cc847c705a3e18d3e76255961314..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/lac/utils.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-util tools
-"""
-from __future__ import print_function
-import os
-import sys
-import numpy as np
-import paddle.fluid as fluid
-import io
-
-
-def str2bool(v):
- """
- argparse does not support True or False in python
- """
- return v.lower() in ("true", "t", "1")
-
-
-def parse_result(words, crf_decode, dataset):
- """ parse result """
- offset_list = (crf_decode.lod())[0]
- words = np.array(words)
- crf_decode = np.array(crf_decode)
- batch_size = len(offset_list) - 1
-
- for sent_index in range(batch_size):
- begin, end = offset_list[sent_index], offset_list[sent_index + 1]
- sent = []
- for id in words[begin:end]:
- if dataset.id2word_dict[str(id[0])] == 'OOV':
- sent.append(' ')
- else:
- sent.append(dataset.id2word_dict[str(id[0])])
- tags = [
- dataset.id2label_dict[str(id[0])] for id in crf_decode[begin:end]
- ]
-
- sent_out = []
- tags_out = []
- parital_word = ""
- for ind, tag in enumerate(tags):
- # for the first word
- if parital_word == "":
- parital_word = sent[ind]
- tags_out.append(tag.split('-')[0])
- continue
-
- # for the beginning of word
- if tag.endswith("-B") or (tag == "O" and tags[ind - 1] != "O"):
- sent_out.append(parital_word)
- tags_out.append(tag.split('-')[0])
- parital_word = sent[ind]
- continue
-
- parital_word += sent[ind]
-
- # append the last word, except for len(tags)=0
- if len(sent_out) < len(tags_out):
- sent_out.append(parital_word)
- return sent_out, tags_out
-
-
-def parse_padding_result(words, crf_decode, seq_lens, dataset):
- """ parse padding result """
- words = np.squeeze(words)
- batch_size = len(seq_lens)
-
- batch_out = []
- for sent_index in range(batch_size):
-
- sent = []
- for id in words[begin:end]:
- if dataset.id2word_dict[str(id[0])] == 'OOV':
- sent.append(' ')
- else:
- sent.append(dataset.id2word_dict[str(id[0])])
- tags = [
- dataset.id2label_dict[str(id)]
- for id in crf_decode[sent_index][1:seq_lens[sent_index] - 1]
- ]
-
- sent_out = []
- tags_out = []
- parital_word = ""
- for ind, tag in enumerate(tags):
- # for the first word
- if parital_word == "":
- parital_word = sent[ind]
- tags_out.append(tag.split('-')[0])
- continue
-
- # for the beginning of word
- if tag.endswith("-B") or (tag == "O" and tags[ind - 1] != "O"):
- sent_out.append(parital_word)
- tags_out.append(tag.split('-')[0])
- parital_word = sent[ind]
- continue
-
- parital_word += sent[ind]
-
- # append the last word, except for len(tags)=0
- if len(sent_out) < len(tags_out):
- sent_out.append(parital_word)
-
- batch_out.append([sent_out, tags_out])
- return batch_out
-
-
-def init_checkpoint(exe, init_checkpoint_path, main_program):
- """
- Init CheckPoint
- """
- assert os.path.exists(
- init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
-
- def existed_persitables(var):
- """
- If existed presitabels
- """
- if not fluid.io.is_persistable(var):
- return False
- return os.path.exists(os.path.join(init_checkpoint_path, var.name))
-
- fluid.io.load_vars(
- exe,
- init_checkpoint_path,
- main_program=main_program,
- predicate=existed_persitables)
diff --git a/examples/Cpp/PaddleNLP/senta/README.md b/examples/Cpp/PaddleNLP/senta/README.md
deleted file mode 100644
index 9a159133eeb20832c1870bb949136a59ae461901..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/senta/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Chinese Sentence Sentiment Classification
-([简体中文](./README_CN.md)|English)
-
-## Get Model
-```
-python3 -m paddle_serving_app.package --get_model senta_bilstm
-python3 -m paddle_serving_app.package --get_model lac
-tar -xzvf senta_bilstm.tar.gz
-tar -xzvf lac.tar.gz
-```
-
-## Start HTTP Service
-```
-python3 -m paddle_serving_server.serve --model lac_model --port 9300
-python3 senta_web_service.py
-```
-In the Chinese sentiment classification task, the Chinese word segmentation needs to be done through [LAC task] (../lac).
-In this demo, the LAC task is placed in the preprocessing part of the HTTP prediction service of the sentiment classification task.
-
-## Client prediction
-```
-curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "天气不错"}], "fetch":["class_probs"]}' http://127.0.0.1:9393/senta/prediction
-```
diff --git a/examples/Cpp/PaddleNLP/senta/README_CN.md b/examples/Cpp/PaddleNLP/senta/README_CN.md
deleted file mode 100644
index a09fd117767cbdd01847d6cdef06992caf4a9715..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/senta/README_CN.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# 中文语句情感分类
-(简体中文|[English](./README.md))
-
-## 获取模型文件
-```
-python3 -m paddle_serving_app.package --get_model senta_bilstm
-python3 -m paddle_serving_app.package --get_model lac
-tar -xzvf lac.tar.gz
-tar -xzvf senta_bilstm.tar.gz
-```
-
-## 启动HTTP服务
-```
-python3 -m paddle_serving_server.serve --model lac_model --port 9300
-python3 senta_web_service.py
-```
-中文情感分类任务中需要先通过[LAC任务](../lac)进行中文分词。
-示例中将LAC任务放在情感分类任务的HTTP预测服务的预处理部分。
-
-## 客户端预测
-```
-curl -H "Content-Type:application/json" -X POST -d '{"feed":[{"words": "天气不错"}], "fetch":["class_probs"]}' http://127.0.0.1:9393/senta/prediction
-```
diff --git a/examples/Cpp/PaddleNLP/senta/get_data.sh b/examples/Cpp/PaddleNLP/senta/get_data.sh
deleted file mode 100644
index 7fd5c3e21880c44f27c4f4a037be87dc24790bc4..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/senta/get_data.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/SentimentAnalysis/senta_bilstm.tar.gz --no-check-certificate
-tar -xzvf senta_bilstm.tar.gz
-wget https://paddle-serving.bj.bcebos.com/paddle_hub_models/text/LexicalAnalysis/lac.tar.gz --no-check-certificate
-tar -xzvf lac.tar.gz
-wget https://paddle-serving.bj.bcebos.com/reader/lac/lac_dict.tar.gz --no-check-certificate
-tar -xzvf lac_dict.tar.gz
-wget https://paddle-serving.bj.bcebos.com/reader/senta/vocab.txt --no-check-certificate
diff --git a/examples/Cpp/PaddleNLP/senta/senta_web_service.py b/examples/Cpp/PaddleNLP/senta/senta_web_service.py
deleted file mode 100644
index 1e872f0eae0e9ecbfae820367e26db9e94f3cf86..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleNLP/senta/senta_web_service.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#encoding=utf-8
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-import numpy as np
-from paddle_serving_server.web_service import WebService
-from paddle_serving_client import Client
-from paddle_serving_app.reader import LACReader, SentaReader
-
-
-class SentaService(WebService):
- #初始化lac模型预测服务
- def init_lac_client(self, lac_port, lac_client_config):
- self.lac_reader = LACReader()
- self.senta_reader = SentaReader()
- self.lac_client = Client()
- self.lac_client.load_client_config(lac_client_config)
- self.lac_client.connect(["127.0.0.1:{}".format(lac_port)])
-
- #定义senta模型预测服务的预处理,调用顺序:lac reader->lac模型预测->预测结果后处理->senta reader
- def preprocess(self, feed=[], fetch=[]):
- feed_batch = []
- is_batch = True
- words_lod = [0]
- for ins in feed:
- if "words" not in ins:
- raise ("feed data error!")
- feed_data = self.lac_reader.process(ins["words"])
- words_lod.append(words_lod[-1] + len(feed_data))
- feed_batch.append(np.array(feed_data).reshape(len(feed_data), 1))
- words = np.concatenate(feed_batch, axis=0)
-
- lac_result = self.lac_client.predict(
- feed={"words": words,
- "words.lod": words_lod},
- fetch=["crf_decode"],
- batch=True)
- result_lod = lac_result["crf_decode.lod"]
- feed_batch = []
- words_lod = [0]
- for i in range(len(feed)):
- segs = self.lac_reader.parse_result(
- feed[i]["words"],
- lac_result["crf_decode"][result_lod[i]:result_lod[i + 1]])
- feed_data = self.senta_reader.process(segs)
- feed_batch.append(np.array(feed_data).reshape(len(feed_data), 1))
- words_lod.append(words_lod[-1] + len(feed_data))
- return {
- "words": np.concatenate(feed_batch),
- "words.lod": words_lod
- }, fetch, is_batch
-
-
-senta_service = SentaService(name="senta")
-senta_service.load_model_config("senta_bilstm_model")
-senta_service.prepare_server(workdir="workdir")
-senta_service.init_lac_client(
- lac_port=9300, lac_client_config="lac_model/serving_server_conf.prototxt")
-senta_service.run_rpc_service()
-senta_service.run_web_service()
diff --git a/examples/Cpp/PaddleOCR/ocr/README.md b/examples/Cpp/PaddleOCR/ocr/README.md
deleted file mode 100755
index 95cc210a7e68d5582e68460f2eec89419bf7fd7c..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/README.md
+++ /dev/null
@@ -1,127 +0,0 @@
-# OCR
-
-(English|[简体中文](./README_CN.md))
-
-## Get Model
-```
-python3 -m paddle_serving_app.package --get_model ocr_rec
-tar -xzvf ocr_rec.tar.gz
-python3 -m paddle_serving_app.package --get_model ocr_det
-tar -xzvf ocr_det.tar.gz
-```
-
-## Get Dataset (Optional)
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/ocr/test_imgs.tar
-tar xf test_imgs.tar
-```
-
-## Web Service
-
-### Start Service
-
-```
-#choose one of cpu/gpu commands as following
-#for cpu user
-python3 -m paddle_serving_server.serve --model ocr_det_model --port 9293
-python3 ocr_web_server.py cpu
-#for gpu user
-python3 -m paddle_serving_server.serve --model ocr_det_model --port 9293 --gpu_ids 0
-python3 ocr_web_server.py gpu
-```
-
-### Client Prediction
-```
-python3 ocr_web_client.py
-```
-If you want a faster web service, please try Web LocalPredictor Service
-
-## Web LocalPredictor Service
-```
-#choose one of cpu/gpu commands as following
-#for cpu user
-python3 ocr_debugger_server.py cpu
-#for gpu user
-python3 ocr_debugger_server.py gpu
-```
-
-## Web LocalPredictor Client Prediction
-```
-python3 ocr_web_client.py
-```
-
-## Benchmark
-
-CPU: Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz * 40
-
-GPU: Nvidia Tesla V100 * 1
-
-Dataset: RCTW 500 sample images
-
-| engine | client read image(ms) | client-server tras time(ms) | server read image(ms) | det pre(ms) | det infer(ms) | det post(ms) | rec pre(ms) | rec infer(ms) | rec post(ms) | server-client trans time(ms) | server side time consumption(ms) | server side overhead(ms) | total time(ms) |
-|------------------------------|----------------|----------------------------|------------------|--------------------|------------------|--------------------|--------------------|------------------|--------------------|--------------------------|--------------------|--------------|---------------|
-| Serving web service | 8.69 | 13.41 | 109.97 | 2.82 | 87.76 | 4.29 | 3.98 | 78.51 | 3.66 | 4.12 | 181.02 | 136.49 | 317.51 |
-| Serving LocalPredictor web service | 8.73 | 16.42 | 115.27 | 2.93 | 20.63 | 3.97 | 4.48 | 13.84 | 3.60 | 6.91 | 49.45 | 147.33 | 196.78 |
-
-## Appendix: For Users who want to launch Det or Rec only
-if you are going to detect images not recognize it or directly recognize the words from images. We also provide Det and Rec server for you.
-
-### Det Server
-
-```
-python3 det_web_server.py cpu #for cpu user
-python3 det_web_server.py gpu #for gpu user
-#or
-python3 det_debugger_server.py cpu #for cpu user
-python3 det_debugger_server.py gpu #for gpu user
-```
-
-### Det Client
-
-```
-# also use ocr_web_client.py
-python3 ocr_web_client.py
-```
-
-### Rec Server
-
-```
-python3 rec_web_server.py cpu #for cpu user
-python3 rec_web_server.py gpu #for gpu user
-#or
-python3 rec_debugger_server.py cpu #for cpu user
-python3 rec_debugger_server.py gpu #for gpu user
-```
-
-### Rec Client
-
-```
-python3 rec_web_client.py
-```
-
-## C++ OCR Service
-
-**Notice:** If you need to concatenate det model and rec model, and do pre-processing and post-processing in Paddle Serving C++ framework, you need to use the C++ server compiled with WITH_OPENCV option,see the [COMPILE.md](../../../doc/COMPILE.md)
-
-### Start Service
-Select a startup mode according to CPU / GPU device
-
-After the -- model parameter, the folder path of multiple model files is passed in to start the prediction service of multiple model concatenation.
-```
-#for cpu user
-python3 -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --port 9293
-#for gpu user
-python3 -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --port 9293 --gpu_ids 0
-```
-
-### Client Prediction
-The pre-processing and post-processing is in the C + + server part, the image's Base64 encoded string is passed into the C + + server.
-
-so the value of parameter `feed_var` which is in the file `ocr_det_client/serving_client_conf.prototxt` should be changed.
-
-for this case, `feed_type` should be 20(which means the data type is string),`shape` should be 1.
-
-By passing in multiple client folder paths, the client can be started for multi model prediction.
-```
-python3 ocr_cpp_client.py ocr_det_client ocr_rec_client
-```
diff --git a/examples/Cpp/PaddleOCR/ocr/README_CN.md b/examples/Cpp/PaddleOCR/ocr/README_CN.md
deleted file mode 100755
index 5c0734c94aa6d61e1fdb9e8f87d5ee187c805ff0..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/README_CN.md
+++ /dev/null
@@ -1,126 +0,0 @@
-# OCR 服务
-
-([English](./README.md)|简体中文)
-
-## 获取模型
-```
-python3 -m paddle_serving_app.package --get_model ocr_rec
-tar -xzvf ocr_rec.tar.gz
-python3 -m paddle_serving_app.package --get_model ocr_det
-tar -xzvf ocr_det.tar.gz
-```
-## 获取数据集(可选)
-```
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/ocr/test_imgs.tar
-tar xf test_imgs.tar
-```
-
-## Web Service服务
-
-### 启动服务
-
-```
-#根据CPU/GPU设备选择一种启动方式
-#for cpu user
-python3 -m paddle_serving_server.serve --model ocr_det_model --port 9293
-python3 ocr_web_server.py cpu
-#for gpu user
-python3 -m paddle_serving_server.serve --model ocr_det_model --port 9293 --gpu_ids 0
-python3 ocr_web_server.py gpu
-```
-
-### 启动客户端
-```
-python3 ocr_web_client.py
-```
-
-如果用户需要更快的执行速度,请尝试LocalPredictor版Web服务
-## 启动LocalPredictor版Web服务
-```
-#根据CPU/GPU设备选择一种启动方式
-#for cpu user
-python3 ocr_debugger_server.py cpu
-#for gpu user
-python3 ocr_debugger_server.py gpu
-```
-
-## 启动客户端
-```
-python3 ocr_web_client.py
-```
-
-## 性能指标
-
-CPU: Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz * 40
-
-GPU: Nvidia Tesla V100单卡
-
-数据集:RCTW 500张测试数据集
-
-| engine | 客户端读图(ms) | 客户端发送请求到服务端(ms) | 服务端读图(ms) | 检测预处理耗时(ms) | 检测模型耗时(ms) | 检测后处理耗时(ms) | 识别预处理耗时(ms) | 识别模型耗时(ms) | 识别后处理耗时(ms) | 服务端回传客户端时间(ms) | 服务端整体耗时(ms) | 空跑耗时(ms) | 整体耗时(ms) |
-|------------------------------|----------------|----------------------------|------------------|--------------------|------------------|--------------------|--------------------|------------------|--------------------|--------------------------|--------------------|--------------|---------------|
-| Serving web service | 8.69 | 13.41 | 109.97 | 2.82 | 87.76 | 4.29 | 3.98 | 78.51 | 3.66 | 4.12 | 181.02 | 136.49 | 317.51 |
-| Serving LocalPredictor web service | 8.73 | 16.42 | 115.27 | 2.93 | 20.63 | 3.97 | 4.48 | 13.84 | 3.60 | 6.91 | 49.45 | 147.33 | 196.78 |
-
-
-## 附录: 检测/识别单服务启动
-如果您想单独启动检测或者识别服务,我们也提供了启动单服务的代码
-
-### 启动检测服务
-
-```
-python3 det_web_server.py cpu #for cpu user
-python3 det_web_server.py gpu #for gpu user
-#or
-python3 det_debugger_server.py cpu #for cpu user
-python3 det_debugger_server.py gpu #for gpu user
-```
-
-### 检测服务客户端
-
-```
-# also use ocr_web_client.py
-python3 ocr_web_client.py
-```
-
-### 启动识别服务
-
-```
-python3 rec_web_server.py cpu #for cpu user
-python3 rec_web_server.py gpu #for gpu user
-#or
-python3 rec_debugger_server.py cpu #for cpu user
-python3 rec_debugger_server.py gpu #for gpu user
-```
-
-### 识别服务客户端
-
-```
-python3 rec_web_client.py
-```
-## C++ OCR Service服务
-
-**注意:** 若您需要使用Paddle Serving C++框架串联det模型和rec模型,并进行前后处理,您需要使用开启WITH_OPENCV选项编译的C++ Server,详见[COMPILE.md](../../../doc/COMPILE.md)
-
-### 启动服务
-根据CPU/GPU设备选择一种启动方式
-
-通过--model后,指定多个模型文件的文件夹路径来启动多模型串联的预测服务。
-```
-#for cpu user
-python3 -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --port 9293
-#for gpu user
-python3 -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --port 9293 --gpu_ids 0
-```
-
-### 启动客户端
-由于需要在C++Server部分进行前后处理,传入C++Server的仅仅是图片的base64编码的字符串,故第一个模型的Client配置需要修改
-
-即`ocr_det_client/serving_client_conf.prototxt`中`feed_var`字段
-
-对于本示例而言,`feed_type`应修改为20(数据类型为string),`shape`为1.
-
-通过在客户端启动后加入多个client模型的client配置文件夹路径,启动client进行预测。
-```
-python3 ocr_cpp_client.py ocr_det_client ocr_rec_client
-```
diff --git a/examples/Cpp/PaddleOCR/ocr/det_debugger_server.py b/examples/Cpp/PaddleOCR/ocr/det_debugger_server.py
deleted file mode 100644
index 5b40fe9372a56b2b663c1bfeff02619a8ec9730b..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/det_debugger_server.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-import cv2
-import sys
-import numpy as np
-import os
-from paddle_serving_client import Client
-from paddle_serving_app.reader import Sequential, ResizeByFactor
-from paddle_serving_app.reader import Div, Normalize, Transpose
-from paddle_serving_app.reader import DBPostProcess, FilterBoxes
-if sys.argv[1] == 'gpu':
- from paddle_serving_server.web_service import WebService
-elif sys.argv[1] == 'cpu':
- from paddle_serving_server.web_service import WebService
-import time
-import re
-import base64
-
-
-class OCRService(WebService):
- def init_det(self):
- self.det_preprocess = Sequential([
- ResizeByFactor(32, 960), Div(255),
- Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
- (2, 0, 1))
- ])
- self.filter_func = FilterBoxes(10, 10)
- self.post_func = DBPostProcess({
- "thresh": 0.3,
- "box_thresh": 0.5,
- "max_candidates": 1000,
- "unclip_ratio": 1.5,
- "min_size": 3
- })
-
- def preprocess(self, feed=[], fetch=[]):
- data = base64.b64decode(feed[0]["image"].encode('utf8'))
- data = np.fromstring(data, np.uint8)
- im = cv2.imdecode(data, cv2.IMREAD_COLOR)
- self.ori_h, self.ori_w, _ = im.shape
- det_img = self.det_preprocess(im)
- _, self.new_h, self.new_w = det_img.shape
- return {
- "image": det_img[np.newaxis, :].copy()
- }, ["concat_1.tmp_0"], True
-
- def postprocess(self, feed={}, fetch=[], fetch_map=None):
- det_out = fetch_map["concat_1.tmp_0"]
- ratio_list = [
- float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w
- ]
- dt_boxes_list = self.post_func(det_out, [ratio_list])
- dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w])
- return {"dt_boxes": dt_boxes.tolist()}
-
-
-ocr_service = OCRService(name="ocr")
-ocr_service.load_model_config("ocr_det_model")
-if sys.argv[1] == 'gpu':
- ocr_service.set_gpus("0")
- ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu")
-elif sys.argv[1] == 'cpu':
- ocr_service.prepare_server(workdir="workdir", port=9292)
-ocr_service.init_det()
-ocr_service.run_debugger_service()
-ocr_service.run_web_service()
diff --git a/examples/Cpp/PaddleOCR/ocr/det_web_server.py b/examples/Cpp/PaddleOCR/ocr/det_web_server.py
deleted file mode 100644
index d38686e5a86c4f2df45db7f495a8c08a72270919..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/det_web_server.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-import cv2
-import sys
-import numpy as np
-import os
-from paddle_serving_client import Client
-from paddle_serving_app.reader import Sequential, ResizeByFactor
-from paddle_serving_app.reader import Div, Normalize, Transpose
-from paddle_serving_app.reader import DBPostProcess, FilterBoxes
-if sys.argv[1] == 'gpu':
- from paddle_serving_server.web_service import WebService
-elif sys.argv[1] == 'cpu':
- from paddle_serving_server.web_service import WebService
-import time
-import re
-import base64
-
-
-class OCRService(WebService):
- def init_det(self):
- self.det_preprocess = Sequential([
- ResizeByFactor(32, 960), Div(255),
- Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
- (2, 0, 1))
- ])
- self.filter_func = FilterBoxes(10, 10)
- self.post_func = DBPostProcess({
- "thresh": 0.3,
- "box_thresh": 0.5,
- "max_candidates": 1000,
- "unclip_ratio": 1.5,
- "min_size": 3
- })
-
- def preprocess(self, feed=[], fetch=[]):
- data = base64.b64decode(feed[0]["image"].encode('utf8'))
- data = np.fromstring(data, np.uint8)
- im = cv2.imdecode(data, cv2.IMREAD_COLOR)
- self.ori_h, self.ori_w, _ = im.shape
- det_img = self.det_preprocess(im)
- _, self.new_h, self.new_w = det_img.shape
- print(det_img)
- return {"image": det_img}, ["concat_1.tmp_0"], False
-
- def postprocess(self, feed={}, fetch=[], fetch_map=None):
- det_out = fetch_map["concat_1.tmp_0"]
- ratio_list = [
- float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w
- ]
- dt_boxes_list = self.post_func(det_out, [ratio_list])
- dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w])
- return {"dt_boxes": dt_boxes.tolist()}
-
-
-ocr_service = OCRService(name="ocr")
-ocr_service.load_model_config("ocr_det_model")
-if sys.argv[1] == 'gpu':
- ocr_service.set_gpus("0")
- ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu")
-elif sys.argv[1] == 'cpu':
- ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu")
-ocr_service.init_det()
-ocr_service.run_rpc_service()
-ocr_service.run_web_service()
diff --git a/examples/Cpp/PaddleOCR/ocr/imgs/1.jpg b/examples/Cpp/PaddleOCR/ocr/imgs/1.jpg
deleted file mode 100644
index 08010177fed2ee8c3709912c06c0b161ba546313..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleOCR/ocr/imgs/1.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleOCR/ocr/ocr_cpp_client.py b/examples/Cpp/PaddleOCR/ocr/ocr_cpp_client.py
deleted file mode 100644
index fa9209aabc4a7e03fe9c69ac85cd496065b1ffc2..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/ocr_cpp_client.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client import Client
-import sys
-import numpy as np
-import base64
-import os
-import cv2
-from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
-from paddle_serving_app.reader import Div, Normalize, Transpose
-
-client = Client()
-# TODO:load_client need to load more than one client model.
-# this need to figure out some details.
-client.load_client_config(sys.argv[1:])
-client.connect(["127.0.0.1:9293"])
-
-import paddle
-test_img_dir = "imgs/"
-
-def cv2_to_base64(image):
- return base64.b64encode(image) #data.tostring()).decode('utf8')
-
-for img_file in os.listdir(test_img_dir):
- with open(os.path.join(test_img_dir, img_file), 'rb') as file:
- image_data = file.read()
- image = cv2_to_base64(image_data)
- fetch_map = client.predict(
- feed={"image": image}, fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"], batch=True)
- #print("{} {}".format(fetch_map["price"][0], data[0][1][0]))
- print(fetch_map)
diff --git a/examples/Cpp/PaddleOCR/ocr/ocr_debugger_server.py b/examples/Cpp/PaddleOCR/ocr/ocr_debugger_server.py
deleted file mode 100644
index 88dd94a8224fc5c9c6f972b96d81af60ce518763..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/ocr_debugger_server.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-from paddle_serving_app.reader import OCRReader
-import cv2
-import sys
-import numpy as np
-import os
-from paddle_serving_client import Client
-from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
-from paddle_serving_app.reader import Div, Normalize, Transpose
-from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
-if sys.argv[1] == 'gpu':
- from paddle_serving_server.web_service import WebService
-elif sys.argv[1] == 'cpu':
- from paddle_serving_server.web_service import WebService
-from paddle_serving_app.local_predict import LocalPredictor
-import time
-import re
-import base64
-
-
-class OCRService(WebService):
- def init_det_debugger(self, det_model_config):
- self.det_preprocess = Sequential([
- ResizeByFactor(32, 960), Div(255),
- Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
- (2, 0, 1))
- ])
- self.det_client = LocalPredictor()
- if sys.argv[1] == 'gpu':
- self.det_client.load_model_config(
- det_model_config, use_gpu=True, gpu_id=1)
- elif sys.argv[1] == 'cpu':
- self.det_client.load_model_config(det_model_config)
- self.ocr_reader = OCRReader()
-
- def preprocess(self, feed=[], fetch=[]):
- data = base64.b64decode(feed[0]["image"].encode('utf8'))
- data = np.fromstring(data, np.uint8)
- im = cv2.imdecode(data, cv2.IMREAD_COLOR)
- ori_h, ori_w, _ = im.shape
- det_img = self.det_preprocess(im)
- _, new_h, new_w = det_img.shape
- det_img = det_img[np.newaxis, :]
- det_img = det_img.copy()
- det_out = self.det_client.predict(
- feed={"image": det_img}, fetch=["concat_1.tmp_0"], batch=True)
- filter_func = FilterBoxes(10, 10)
- post_func = DBPostProcess({
- "thresh": 0.3,
- "box_thresh": 0.5,
- "max_candidates": 1000,
- "unclip_ratio": 1.5,
- "min_size": 3
- })
- sorted_boxes = SortedBoxes()
- ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w]
- dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list])
- dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w])
- dt_boxes = sorted_boxes(dt_boxes)
- get_rotate_crop_image = GetRotateCropImage()
- img_list = []
- max_wh_ratio = 0
- for i, dtbox in enumerate(dt_boxes):
- boximg = get_rotate_crop_image(im, dt_boxes[i])
- img_list.append(boximg)
- h, w = boximg.shape[0:2]
- wh_ratio = w * 1.0 / h
- max_wh_ratio = max(max_wh_ratio, wh_ratio)
- if len(img_list) == 0:
- return [], []
- _, w, h = self.ocr_reader.resize_norm_img(img_list[0],
- max_wh_ratio).shape
- imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')
- for id, img in enumerate(img_list):
- norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
- imgs[id] = norm_img
- feed = {"image": imgs.copy()}
- fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
- return feed, fetch, True
-
- def postprocess(self, feed={}, fetch=[], fetch_map=None):
- rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
- res_lst = []
- for res in rec_res:
- res_lst.append(res[0])
- res = {"res": res_lst}
- return res
-
-
-ocr_service = OCRService(name="ocr")
-ocr_service.load_model_config("ocr_rec_model")
-ocr_service.prepare_server(workdir="workdir", port=9292)
-ocr_service.init_det_debugger(det_model_config="ocr_det_model")
-if sys.argv[1] == 'gpu':
- ocr_service.set_gpus("0")
- ocr_service.run_debugger_service(gpu=True)
-elif sys.argv[1] == 'cpu':
- ocr_service.run_debugger_service()
-ocr_service.run_web_service()
diff --git a/examples/Cpp/PaddleOCR/ocr/ocr_web_client.py b/examples/Cpp/PaddleOCR/ocr/ocr_web_client.py
deleted file mode 100644
index ce96a8bbcd585f37368d70070d649e25a0129029..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/ocr_web_client.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# -*- coding: utf-8 -*-
-
-import requests
-import json
-import cv2
-import base64
-import os, sys
-import time
-
-
-def cv2_to_base64(image):
- #data = cv2.imencode('.jpg', image)[1]
- return base64.b64encode(image).decode(
- 'utf8') #data.tostring()).decode('utf8')
-
-
-headers = {"Content-type": "application/json"}
-url = "http://127.0.0.1:9292/ocr/prediction"
-test_img_dir = "imgs/"
-for img_file in os.listdir(test_img_dir):
- with open(os.path.join(test_img_dir, img_file), 'rb') as file:
- image_data1 = file.read()
- image = cv2_to_base64(image_data1)
- data = {"feed": [{"image": image}], "fetch": ["res"]}
- r = requests.post(url=url, headers=headers, data=json.dumps(data))
- print(r)
- print(r.json())
diff --git a/examples/Cpp/PaddleOCR/ocr/ocr_web_server.py b/examples/Cpp/PaddleOCR/ocr/ocr_web_server.py
deleted file mode 100644
index 58fc850c94a5e8d2f37ae5d03f14b60d343a2203..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/ocr_web_server.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-from paddle_serving_app.reader import OCRReader
-import cv2
-import sys
-import numpy as np
-import os
-from paddle_serving_client import Client
-from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
-from paddle_serving_app.reader import Div, Normalize, Transpose
-from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
-if sys.argv[1] == 'gpu':
- from paddle_serving_server.web_service import WebService
-elif sys.argv[1] == 'cpu':
- from paddle_serving_server.web_service import WebService
-import time
-import re
-import base64
-
-
-class OCRService(WebService):
- def init_det_client(self, det_port, det_client_config):
- self.det_preprocess = Sequential([
- ResizeByFactor(32, 960), Div(255),
- Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
- (2, 0, 1))
- ])
- self.det_client = Client()
- self.det_client.load_client_config(det_client_config)
- self.det_client.connect(["127.0.0.1:{}".format(det_port)])
- self.ocr_reader = OCRReader()
-
- def preprocess(self, feed=[], fetch=[]):
- data = base64.b64decode(feed[0]["image"].encode('utf8'))
- data = np.fromstring(data, np.uint8)
- im = cv2.imdecode(data, cv2.IMREAD_COLOR)
- ori_h, ori_w, _ = im.shape
- det_img = self.det_preprocess(im)
- det_out = self.det_client.predict(
- feed={"image": det_img}, fetch=["concat_1.tmp_0"], batch=False)
- _, new_h, new_w = det_img.shape
- filter_func = FilterBoxes(10, 10)
- post_func = DBPostProcess({
- "thresh": 0.3,
- "box_thresh": 0.5,
- "max_candidates": 1000,
- "unclip_ratio": 1.5,
- "min_size": 3
- })
- sorted_boxes = SortedBoxes()
- ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w]
- dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list])
- dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w])
- dt_boxes = sorted_boxes(dt_boxes)
- get_rotate_crop_image = GetRotateCropImage()
- feed_list = []
- img_list = []
- max_wh_ratio = 0
- for i, dtbox in enumerate(dt_boxes):
- boximg = get_rotate_crop_image(im, dt_boxes[i])
- img_list.append(boximg)
- h, w = boximg.shape[0:2]
- wh_ratio = w * 1.0 / h
- max_wh_ratio = max(max_wh_ratio, wh_ratio)
- for img in img_list:
- norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
- feed_list.append(norm_img[np.newaxis, :])
- feed_batch = {"image": np.concatenate(feed_list, axis=0)}
- fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
- return feed_batch, fetch, True
-
- def postprocess(self, feed={}, fetch=[], fetch_map=None):
- rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
- res_lst = []
- for res in rec_res:
- res_lst.append(res[0])
- res = {"res": res_lst}
- return res
-
-
-ocr_service = OCRService(name="ocr")
-ocr_service.load_model_config("ocr_rec_model")
-if sys.argv[1] == 'gpu':
- ocr_service.set_gpus("0")
- ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu")
-elif sys.argv[1] == 'cpu':
- ocr_service.prepare_server(workdir="workdir", port=9292)
-ocr_service.init_det_client(
- det_port=9293,
- det_client_config="ocr_det_client/serving_client_conf.prototxt")
-ocr_service.run_rpc_service()
-ocr_service.run_web_service()
diff --git a/examples/Cpp/PaddleOCR/ocr/rec_debugger_server.py b/examples/Cpp/PaddleOCR/ocr/rec_debugger_server.py
deleted file mode 100644
index f84463238af859a00983f515e405686c00fdf9fa..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/rec_debugger_server.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-from paddle_serving_app.reader import OCRReader
-import cv2
-import sys
-import numpy as np
-import os
-from paddle_serving_client import Client
-from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
-from paddle_serving_app.reader import Div, Normalize, Transpose
-from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
-if sys.argv[1] == 'gpu':
- from paddle_serving_server.web_service import WebService
-elif sys.argv[1] == 'cpu':
- from paddle_serving_server.web_service import WebService
-import time
-import re
-import base64
-
-
-class OCRService(WebService):
- def init_rec(self):
- self.ocr_reader = OCRReader()
-
- def preprocess(self, feed=[], fetch=[]):
- img_list = []
- for feed_data in feed:
- data = base64.b64decode(feed_data["image"].encode('utf8'))
- data = np.fromstring(data, np.uint8)
- im = cv2.imdecode(data, cv2.IMREAD_COLOR)
- img_list.append(im)
- max_wh_ratio = 0
- for i, boximg in enumerate(img_list):
- h, w = boximg.shape[0:2]
- wh_ratio = w * 1.0 / h
- max_wh_ratio = max(max_wh_ratio, wh_ratio)
- _, w, h = self.ocr_reader.resize_norm_img(img_list[0],
- max_wh_ratio).shape
- imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')
- for i, img in enumerate(img_list):
- norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
- imgs[i] = norm_img
- feed = {"image": imgs.copy()}
- fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
- return feed, fetch, True
-
- def postprocess(self, feed={}, fetch=[], fetch_map=None):
- rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
- res_lst = []
- for res in rec_res:
- res_lst.append(res[0])
- res = {"res": res_lst}
- return res
-
-
-ocr_service = OCRService(name="ocr")
-ocr_service.load_model_config("ocr_rec_model")
-if sys.argv[1] == 'gpu':
- ocr_service.set_gpus("0")
- ocr_service.init_rec()
- ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu")
-elif sys.argv[1] == 'cpu':
- ocr_service.init_rec()
- ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu")
-ocr_service.run_debugger_service()
-ocr_service.run_web_service()
diff --git a/examples/Cpp/PaddleOCR/ocr/rec_img/ch_doc3.jpg b/examples/Cpp/PaddleOCR/ocr/rec_img/ch_doc3.jpg
deleted file mode 100644
index c0c2053643c6211b9c2017e305c5fa05bba0cc66..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleOCR/ocr/rec_img/ch_doc3.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleOCR/ocr/rec_web_client.py b/examples/Cpp/PaddleOCR/ocr/rec_web_client.py
deleted file mode 100644
index 312a2148886d6f084a1c077d84e907cb28c0652a..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/rec_web_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# -*- coding: utf-8 -*-
-
-import requests
-import json
-import cv2
-import base64
-import os, sys
-import time
-
-
-def cv2_to_base64(image):
- #data = cv2.imencode('.jpg', image)[1]
- return base64.b64encode(image).decode(
- 'utf8') #data.tostring()).decode('utf8')
-
-
-headers = {"Content-type": "application/json"}
-url = "http://127.0.0.1:9292/ocr/prediction"
-test_img_dir = "rec_img/"
-
-for img_file in os.listdir(test_img_dir):
- with open(os.path.join(test_img_dir, img_file), 'rb') as file:
- image_data1 = file.read()
- image = cv2_to_base64(image_data1)
- #data = {"feed": [{"image": image}], "fetch": ["res"]}
- data = {"feed": [{"image": image}] * 3, "fetch": ["res"]}
- r = requests.post(url=url, headers=headers, data=json.dumps(data))
- print(r.json())
diff --git a/examples/Cpp/PaddleOCR/ocr/rec_web_server.py b/examples/Cpp/PaddleOCR/ocr/rec_web_server.py
deleted file mode 100644
index 2db6e398d3a025e739761fabd50c5bb8a6609f07..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleOCR/ocr/rec_web_server.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-from paddle_serving_app.reader import OCRReader
-import cv2
-import sys
-import numpy as np
-import os
-from paddle_serving_client import Client
-from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
-from paddle_serving_app.reader import Div, Normalize, Transpose
-from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
-if sys.argv[1] == 'gpu':
- from paddle_serving_server.web_service import WebService
-elif sys.argv[1] == 'cpu':
- from paddle_serving_server.web_service import WebService
-import time
-import re
-import base64
-
-
-class OCRService(WebService):
- def init_rec(self):
- self.ocr_reader = OCRReader()
-
- def preprocess(self, feed=[], fetch=[]):
- # TODO: to handle batch rec images
- img_list = []
- for feed_data in feed:
- data = base64.b64decode(feed_data["image"].encode('utf8'))
- data = np.fromstring(data, np.uint8)
- im = cv2.imdecode(data, cv2.IMREAD_COLOR)
- img_list.append(im)
- max_wh_ratio = 0
- for i, boximg in enumerate(img_list):
- h, w = boximg.shape[0:2]
- wh_ratio = w * 1.0 / h
- max_wh_ratio = max(max_wh_ratio, wh_ratio)
- _, w, h = self.ocr_reader.resize_norm_img(img_list[0],
- max_wh_ratio).shape
- imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')
- for i, img in enumerate(img_list):
- norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
- imgs[i] = norm_img
-
- feed = {"image": imgs.copy()}
- fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
- return feed, fetch, True
-
- def postprocess(self, feed={}, fetch=[], fetch_map=None):
- rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
- res_lst = []
- for res in rec_res:
- res_lst.append(res[0])
- res = {"res": res_lst}
- return res
-
-
-ocr_service = OCRService(name="ocr")
-ocr_service.load_model_config("ocr_rec_model")
-ocr_service.init_rec()
-if sys.argv[1] == 'gpu':
- ocr_service.set_gpus("0")
- ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu")
-elif sys.argv[1] == 'cpu':
- ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu")
-ocr_service.run_rpc_service()
-ocr_service.run_web_service()
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/README.md b/examples/Cpp/PaddleRec/criteo_ctr/README.md
deleted file mode 100644
index 6c1d79e7362a0240a49a9f0243f3de3340119ce3..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-## CTR Prediction Service
-
-([简体中文](./README_CN.md)|English)
-
-### download criteo dataset
-```
-sh get_data.sh
-```
-
-### download inference model
-```
-wget https://paddle-serving.bj.bcebos.com/criteo_ctr_example/criteo_ctr_demo_model.tar.gz
-tar xf criteo_ctr_demo_model.tar.gz
-mv models/ctr_client_conf .
-mv models/ctr_serving_model .
-```
-the directories like `ctr_serving_model` and `ctr_client_conf` will appear.
-
-### Start RPC Inference Service
-
-```
-python3 -m paddle_serving_server.serve --model ctr_serving_model/ --port 9292 #CPU RPC Service
-python3 -m paddle_serving_server.serve --model ctr_serving_model/ --port 9292 --gpu_ids 0 #RPC Service on GPU 0
-```
-
-### RPC Infer
-
-```
-python3 test_client.py ctr_client_conf/serving_client_conf.prototxt raw_data/part-0
-```
-the latency will display in the end.
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/README_CN.md b/examples/Cpp/PaddleRec/criteo_ctr/README_CN.md
deleted file mode 100644
index c5b1da76055e64bd08bcf2a00dffe537bc931ee9..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/README_CN.md
+++ /dev/null
@@ -1,31 +0,0 @@
-## CTR预测服务
-
-(简体中文|[English](./README.md))
-
-### 获取样例数据
-```
-sh get_data.sh
-```
-
-### 下载模型
-```
-wget https://paddle-serving.bj.bcebos.com/criteo_ctr_example/criteo_ctr_demo_model.tar.gz
-tar xf criteo_ctr_demo_model.tar.gz
-mv models/ctr_client_conf .
-mv models/ctr_serving_model .
-```
-会在当前目录出现`ctr_serving_model` 和 `ctr_client_conf`文件夹。
-
-### 启动RPC预测服务
-
-```
-python3 -m paddle_serving_server.serve --model ctr_serving_model/ --port 9292 #启动CPU预测服务
-python3 -m paddle_serving_server.serve --model ctr_serving_model/ --port 9292 --gpu_ids 0 #在GPU 0上启动预测服务
-```
-
-### 执行预测
-
-```
-python3 test_client.py ctr_client_conf/serving_client_conf.prototxt raw_data/part-0
-```
-预测完毕会输出预测过程的耗时。
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/args.py b/examples/Cpp/PaddleRec/criteo_ctr/args.py
deleted file mode 100644
index 30124d4ebd9cd27cdb4580e654a8a47c55b178bf..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/args.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-import argparse
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="PaddlePaddle CTR example")
- parser.add_argument(
- '--train_data_path',
- type=str,
- default='./data/raw/train.txt',
- help="The path of training dataset")
- parser.add_argument(
- '--sparse_only',
- type=bool,
- default=False,
- help="Whether we use sparse features only")
- parser.add_argument(
- '--test_data_path',
- type=str,
- default='./data/raw/valid.txt',
- help="The path of testing dataset")
- parser.add_argument(
- '--batch_size',
- type=int,
- default=1000,
- help="The size of mini-batch (default:1000)")
- parser.add_argument(
- '--embedding_size',
- type=int,
- default=10,
- help="The size for embedding layer (default:10)")
- parser.add_argument(
- '--num_passes',
- type=int,
- default=10,
- help="The number of passes to train (default: 10)")
- parser.add_argument(
- '--model_output_dir',
- type=str,
- default='models',
- help='The path for model to store (default: models)')
- parser.add_argument(
- '--sparse_feature_dim',
- type=int,
- default=1000001,
- help='sparse feature hashing space for index processing')
- parser.add_argument(
- '--is_local',
- type=int,
- default=1,
- help='Local train or distributed train (default: 1)')
- parser.add_argument(
- '--cloud_train',
- type=int,
- default=0,
- help='Local train or distributed train on paddlecloud (default: 0)')
- parser.add_argument(
- '--async_mode',
- action='store_true',
- default=False,
- help='Whether start pserver in async mode to support ASGD')
- parser.add_argument(
- '--no_split_var',
- action='store_true',
- default=False,
- help='Whether split variables into blocks when update_method is pserver')
- parser.add_argument(
- '--role',
- type=str,
- default='pserver', # trainer or pserver
- help='The path for model to store (default: models)')
- parser.add_argument(
- '--endpoints',
- type=str,
- default='127.0.0.1:6000',
- help='The pserver endpoints, like: 127.0.0.1:6000,127.0.0.1:6001')
- parser.add_argument(
- '--current_endpoint',
- type=str,
- default='127.0.0.1:6000',
- help='The path for model to store (default: 127.0.0.1:6000)')
- parser.add_argument(
- '--trainer_id',
- type=int,
- default=0,
- help='The path for model to store (default: models)')
- parser.add_argument(
- '--trainers',
- type=int,
- default=1,
- help='The num of trianers, (default: 1)')
- return parser.parse_args()
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/benchmark.py b/examples/Cpp/PaddleRec/criteo_ctr/benchmark.py
deleted file mode 100644
index 8be7387d6ef32d656f676d55c21e25052e403f16..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/benchmark.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from __future__ import unicode_literals, absolute_import
-import os
-import sys
-import time
-from paddle_serving_client import Client
-from paddle_serving_client.utils import MultiThreadRunner
-from paddle_serving_client.utils import benchmark_args
-import requests
-import json
-import criteo_reader as criteo
-
-args = benchmark_args()
-
-
-def single_func(idx, resource):
- batch = 1
- buf_size = 100
- dataset = criteo.CriteoDataset()
- dataset.setup(1000001)
- test_filelists = [
- "./raw_data/part-%d" % x for x in range(len(os.listdir("./raw_data")))
- ]
- reader = dataset.infer_reader(test_filelists[len(test_filelists) - 40:],
- batch, buf_size)
- if args.request == "rpc":
- fetch = ["prob"]
- client = Client()
- client.load_client_config(args.model)
- client.connect([resource["endpoint"][idx % len(resource["endpoint"])]])
-
- start = time.time()
- for i in range(1000):
- if args.batch_size == 1:
- data = reader().next()
- feed_dict = {}
- for i in range(1, 27):
- feed_dict["sparse_{}".format(i - 1)] = data[0][i]
- result = client.predict(feed=feed_dict, fetch=fetch)
- else:
- print("unsupport batch size {}".format(args.batch_size))
-
- elif args.request == "http":
- raise ("Not support http service.")
- end = time.time()
- return [[end - start]]
-
-
-if __name__ == '__main__':
- multi_thread_runner = MultiThreadRunner()
- endpoint_list = ["127.0.0.1:9292"]
- #endpoint_list = endpoint_list + endpoint_list + endpoint_list
- result = multi_thread_runner.run(single_func, args.thread,
- {"endpoint": endpoint_list})
- #result = single_func(0, {"endpoint": endpoint_list})
- avg_cost = 0
- for i in range(args.thread):
- avg_cost += result[0][i]
- avg_cost = avg_cost / args.thread
- print("average total cost {} s.".format(avg_cost))
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/benchmark.sh b/examples/Cpp/PaddleRec/criteo_ctr/benchmark.sh
deleted file mode 100644
index cf7bc6b33fede8a8277dbe5533ed646a1ddee5ba..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/benchmark.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-rm profile_log
-for thread_num in 1 2 4 8 16
-do
- $PYTHONROOT/bin/python benchmark.py --thread $thread_num --model ctr_client_conf/serving_client_conf.prototxt --request rpc > profile 2>&1
- echo "========================================"
- echo "batch size : $batch_size" >> profile_log
- $PYTHONROOT/bin/python ../util/show_profile.py profile $thread_num >> profile_log
- tail -n 1 profile >> profile_log
-done
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/benchmark_batch.py b/examples/Cpp/PaddleRec/criteo_ctr/benchmark_batch.py
deleted file mode 100644
index 1e4348c99dc0d960b1818ea6f0eb1ae2f5bd2ccb..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/benchmark_batch.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from __future__ import unicode_literals, absolute_import
-import os
-import sys
-import time
-from paddle_serving_client import Client
-from paddle_serving_client.utils import MultiThreadRunner
-from paddle_serving_client.utils import benchmark_args
-import requests
-import json
-import criteo_reader as criteo
-
-args = benchmark_args()
-
-
-def single_func(idx, resource):
- batch = 1
- buf_size = 100
- dataset = criteo.CriteoDataset()
- dataset.setup(1000001)
- test_filelists = [
- "./raw_data/part-%d" % x for x in range(len(os.listdir("./raw_data")))
- ]
- reader = dataset.infer_reader(test_filelists[len(test_filelists) - 40:],
- batch, buf_size)
- if args.request == "rpc":
- fetch = ["prob"]
- client = Client()
- client.load_client_config(args.model)
- client.connect([resource["endpoint"][idx % len(resource["endpoint"])]])
-
- start = time.time()
- for i in range(1000):
- if args.batch_size >= 1:
- feed_batch = []
- for bi in range(args.batch_size):
- feed_dict = {}
- data = reader().next()
- for i in range(1, 27):
- feed_dict["sparse_{}".format(i - 1)] = data[0][i]
- feed_batch.append(feed_dict)
- result = client.predict(feed=feed_batch, fetch=fetch)
- else:
- print("unsupport batch size {}".format(args.batch_size))
-
- elif args.request == "http":
- raise ("no batch predict for http")
- end = time.time()
- return [[end - start]]
-
-
-if __name__ == '__main__':
- multi_thread_runner = MultiThreadRunner()
- endpoint_list = ["127.0.0.1:9292"]
- #endpoint_list = endpoint_list + endpoint_list + endpoint_list
- result = multi_thread_runner.run(single_func, args.thread,
- {"endpoint": endpoint_list})
- #result = single_func(0, {"endpoint": endpoint_list})
- avg_cost = 0
- for i in range(args.thread):
- avg_cost += result[0][i]
- avg_cost = avg_cost / args.thread
- print("average total cost {} s.".format(avg_cost))
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/benchmark_batch.sh b/examples/Cpp/PaddleRec/criteo_ctr/benchmark_batch.sh
deleted file mode 100644
index 46ba451d0ade36c24151e260d5c9b3cc3666a548..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/benchmark_batch.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-rm profile_log
-for thread_num in 1 2 4 8 16
-do
-for batch_size in 1 2 4 8 16 32 64 128 256 512
-do
- $PYTHONROOT/bin/python benchmark_batch.py --thread $thread_num --batch_size $batch_size --model serving_client_conf/serving_client_conf.prototxt --request rpc > profile 2>&1
- echo "========================================"
- echo "batch size : $batch_size" >> profile_log
- $PYTHONROOT/bin/python ../util/show_profile.py profile $thread_num >> profile_log
- tail -n 1 profile >> profile_log
-done
-done
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/clean.sh b/examples/Cpp/PaddleRec/criteo_ctr/clean.sh
deleted file mode 100644
index 78703636bf2b5b037e75d07055ca377abb3123c4..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/clean.sh
+++ /dev/null
@@ -1 +0,0 @@
-rm -rf *pyc kvdb raw_data ctr_client_conf ctr_serving_model ctr_data.tar.gz *~
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/get_data.sh b/examples/Cpp/PaddleRec/criteo_ctr/get_data.sh
deleted file mode 100644
index 1f244b3a4aa81488bb493825576ba30c4b3bba22..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/get_data.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/data/ctr_prediction/ctr_data.tar.gz
-tar -zxvf ctr_data.tar.gz
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/local_train.py b/examples/Cpp/PaddleRec/criteo_ctr/local_train.py
deleted file mode 100644
index bbc940750c5f608b47300a9a33f9e48bfb4344b1..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/local_train.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from __future__ import print_function
-
-from args import parse_args
-import os
-import paddle.fluid as fluid
-import sys
-from network_conf import dnn_model
-
-dense_feature_dim = 13
-
-
-def train():
- args = parse_args()
- sparse_only = args.sparse_only
- if not os.path.isdir(args.model_output_dir):
- os.mkdir(args.model_output_dir)
- dense_input = fluid.layers.data(
- name="dense_input", shape=[dense_feature_dim], dtype='float32')
- sparse_input_ids = [
- fluid.layers.data(
- name="C" + str(i), shape=[1], lod_level=1, dtype="int64")
- for i in range(1, 27)
- ]
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
-
- #nn_input = None if sparse_only else dense_input
- nn_input = dense_input
- predict_y, loss, auc_var, batch_auc_var = dnn_model(
- nn_input, sparse_input_ids, label, args.embedding_size,
- args.sparse_feature_dim)
-
- optimizer = fluid.optimizer.SGD(learning_rate=1e-4)
- optimizer.minimize(loss)
-
- exe = fluid.Executor(fluid.CPUPlace())
- exe.run(fluid.default_startup_program())
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- dataset.set_use_var([dense_input] + sparse_input_ids + [label])
-
- python_executable = "python"
- pipe_command = "{} criteo_reader.py {}".format(python_executable,
- args.sparse_feature_dim)
-
- dataset.set_pipe_command(pipe_command)
- dataset.set_batch_size(128)
- thread_num = 10
- dataset.set_thread(thread_num)
-
- whole_filelist = [
- "raw_data/part-%d" % x for x in range(len(os.listdir("raw_data")))
- ]
-
- dataset.set_filelist(whole_filelist[:thread_num])
- dataset.load_into_memory()
-
- epochs = 1
- for i in range(epochs):
- exe.train_from_dataset(
- program=fluid.default_main_program(), dataset=dataset, debug=True)
- print("epoch {} finished".format(i))
-
- import paddle_serving_client.io as server_io
- feed_var_dict = {}
- for i, sparse in enumerate(sparse_input_ids):
- feed_var_dict["sparse_{}".format(i)] = sparse
- fetch_var_dict = {"prob": predict_y}
-
- server_io.save_model("ctr_serving_model", "ctr_client_conf", feed_var_dict,
- fetch_var_dict, fluid.default_main_program())
-
-
-if __name__ == '__main__':
- train()
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/network_conf.py b/examples/Cpp/PaddleRec/criteo_ctr/network_conf.py
deleted file mode 100644
index ec5eb7d55ab59965e474842015301b3a9088d91e..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/network_conf.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import paddle.fluid as fluid
-import math
-
-
-def dnn_model(dense_input, sparse_inputs, label, embedding_size,
- sparse_feature_dim):
- def embedding_layer(input):
- emb = fluid.layers.embedding(
- input=input,
- is_sparse=True,
- is_distributed=False,
- size=[sparse_feature_dim, embedding_size],
- param_attr=fluid.ParamAttr(
- name="SparseFeatFactors",
- initializer=fluid.initializer.Uniform()))
- return fluid.layers.sequence_pool(input=emb, pool_type='sum')
-
- def mlp_input_tensor(emb_sums, dense_tensor):
- if isinstance(dense_tensor, fluid.Variable):
- return fluid.layers.concat(emb_sums, axis=1)
- else:
- return fluid.layers.concat(emb_sums + [dense_tensor], axis=1)
-
- def mlp(mlp_input):
- fc1 = fluid.layers.fc(input=mlp_input,
- size=400,
- act='relu',
- param_attr=fluid.ParamAttr(
- initializer=fluid.initializer.Normal(
- scale=1 / math.sqrt(mlp_input.shape[1]))))
- fc2 = fluid.layers.fc(input=fc1,
- size=400,
- act='relu',
- param_attr=fluid.ParamAttr(
- initializer=fluid.initializer.Normal(
- scale=1 / math.sqrt(fc1.shape[1]))))
- fc3 = fluid.layers.fc(input=fc2,
- size=400,
- act='relu',
- param_attr=fluid.ParamAttr(
- initializer=fluid.initializer.Normal(
- scale=1 / math.sqrt(fc2.shape[1]))))
- pre = fluid.layers.fc(input=fc3,
- size=2,
- act='softmax',
- param_attr=fluid.ParamAttr(
- initializer=fluid.initializer.Normal(
- scale=1 / math.sqrt(fc3.shape[1]))))
- return pre
-
- emb_sums = list(map(embedding_layer, sparse_inputs))
- mlp_in = mlp_input_tensor(emb_sums, dense_input)
- predict = mlp(mlp_in)
- cost = fluid.layers.cross_entropy(input=predict, label=label)
- avg_cost = fluid.layers.reduce_sum(cost)
- accuracy = fluid.layers.accuracy(input=predict, label=label)
- auc_var, batch_auc_var, auc_states = \
- fluid.layers.auc(input=predict, label=label, num_thresholds=2 ** 12, slide_steps=20)
- return predict, avg_cost, auc_var, batch_auc_var
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/test_client.py b/examples/Cpp/PaddleRec/criteo_ctr/test_client.py
deleted file mode 100644
index fd6c6e03178915bbfc2dd0608e27a7f597945dca..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/test_client.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client import Client
-import sys
-import os
-import time
-from paddle_serving_client.metric import auc
-import numpy as np
-import sys
-
-class CriteoReader(object):
- def __init__(self, sparse_feature_dim):
- self.cont_min_ = [0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- self.cont_max_ = [
- 20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
- ]
- self.cont_diff_ = [
- 20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
- ]
- self.hash_dim_ = sparse_feature_dim
- # here, training data are lines with line_index < train_idx_
- self.train_idx_ = 41256555
- self.continuous_range_ = range(1, 14)
- self.categorical_range_ = range(14, 40)
-
- def process_line(self, line):
- features = line.rstrip('\n').split('\t')
- dense_feature = []
- sparse_feature = []
- for idx in self.continuous_range_:
- if features[idx] == '':
- dense_feature.append(0.0)
- else:
- dense_feature.append((float(features[idx]) - self.cont_min_[idx - 1]) / \
- self.cont_diff_[idx - 1])
- for idx in self.categorical_range_:
- sparse_feature.append(
- [hash(str(idx) + features[idx]) % self.hash_dim_])
-
- return sparse_feature
-
-py_version = sys.version_info[0]
-
-client = Client()
-client.load_client_config(sys.argv[1])
-client.connect(["127.0.0.1:9292"])
-reader = CriteoReader(1000001)
-batch = 1
-buf_size = 100
-label_list = []
-prob_list = []
-start = time.time()
-f = open(sys.argv[2], 'r')
-for ei in range(10):
- data = reader.process_line(f.readline())
- feed_dict = {}
- for i in range(1, 27):
- feed_dict["sparse_{}".format(i - 1)] = np.array(data[i-1]).reshape(-1)
- feed_dict["sparse_{}.lod".format(i - 1)] = [0, len(data[i-1])]
- fetch_map = client.predict(feed=feed_dict, fetch=["prob"])
- print(fetch_map)
-end = time.time()
-f.close()
diff --git a/examples/Cpp/PaddleRec/criteo_ctr/test_server.py b/examples/Cpp/PaddleRec/criteo_ctr/test_server.py
deleted file mode 100644
index 34f859daab4c808aa9d50d2109a81a69eed96df6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr/test_server.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import os
-import sys
-from paddle_serving_server import OpMaker
-from paddle_serving_server import OpSeqMaker
-from paddle_serving_server import Server
-
-op_maker = OpMaker()
-read_op = op_maker.create('general_reader')
-general_infer_op = op_maker.create('general_infer')
-response_op = op_maker.create('general_response')
-
-op_seq_maker = OpSeqMaker()
-op_seq_maker.add_op(read_op)
-op_seq_maker.add_op(general_infer_op)
-op_seq_maker.add_op(response_op)
-
-server = Server()
-server.set_op_sequence(op_seq_maker.get_op_sequence())
-server.load_model_config(sys.argv[1])
-server.prepare_server(workdir="work_dir1", port=9292, device="cpu")
-server.run_server()
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/README.md b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/README.md
deleted file mode 100755
index 4492b398add170104a7cd17adff6dc5b83368dbe..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-## Criteo CTR with Sparse Parameter Indexing Service
-
-([简体中文](./README_CN.md)|English)
-
-### Get Sample Dataset
-
-go to directory `python/examples/criteo_ctr_with_cube`
-```
-sh get_data.sh
-```
-
-### Download Model and Sparse Parameter Sequence Files
-```
-wget https://paddle-serving.bj.bcebos.com/unittest/ctr_cube_unittest.tar.gz
-tar xf ctr_cube_unittest.tar.gz
-mv models/ctr_client_conf ./
-mv models/ctr_serving_model_kv ./
-mv models/data ./cube/
-```
-the model will be in ./ctr_server_model_kv and ./ctr_client_config.
-
-### Start Sparse Parameter Indexing Service
-```
-wget https://paddle-serving.bj.bcebos.com/others/cube_app.tar.gz
-tar xf cube_app.tar.gz
-mv cube_app/cube* ./cube/
-sh cube_prepare.sh &
-```
-
-Here, the sparse parameter is loaded by cube sparse parameter indexing service Cube.
-
-### Start RPC Predictor, the number of serving thread is 4(configurable in test_server.py)
-
-```
-python3 test_server.py ctr_serving_model_kv
-```
-
-### Run Prediction
-
-```
-python3 test_client.py ctr_client_conf/serving_client_conf.prototxt ./raw_data
-```
-
-### Benchmark
-
-CPU :Intel(R) Xeon(R) CPU 6148 @ 2.40GHz
-
-Model :[Criteo CTR](https://github.com/PaddlePaddle/Serving/blob/develop/python/examples/criteo_ctr_with_cube/network_conf.py)
-
-server core/thread num : 4/8
-
-Run
-```
-bash benchmark.sh
-```
-1000 batches will be sent by every client
-
-| client thread num | prepro | client infer | op0 | op1 | op2 | postpro | avg_latency | qps |
-| ------------------ | ------ | ------------ | ------ | ----- | ------ | ------- | ----- | ----- |
-| 1 | 0.035 | 1.596 | 0.021 | 0.518 | 0.0024 | 0.0025 | 6.774 | 147.7 |
-| 2 | 0.034 | 1.780 | 0.027 | 0.463 | 0.0020 | 0.0023 | 6.931 | 288.3 |
-| 4 | 0.038 | 2.954 | 0.025 | 0.455 | 0.0019 | 0.0027 | 8.378 | 477.5 |
-| 8 | 0.044 | 8.230 | 0.028 | 0.464 | 0.0023 | 0.0034 | 14.191 | 563.8 |
-| 16 | 0.048 | 21.037 | 0.028 | 0.455 | 0.0025 | 0.0041 | 27.236 | 587.5 |
-
-the average latency of threads
-
-![avg cost](../../../doc/images/criteo-cube-benchmark-avgcost.png)
-
-The QPS is
-
-![qps](../../../doc/images/criteo-cube-benchmark-qps.png)
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/README_CN.md b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/README_CN.md
deleted file mode 100644
index 8c8d51d918410f5fa8a681dacab2000cdc0192bd..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/README_CN.md
+++ /dev/null
@@ -1,70 +0,0 @@
-## 带稀疏参数索引服务的CTR预测服务
-(简体中文|[English](./README.md))
-
-### 获取样例数据
-进入目录 `python/examples/criteo_ctr_with_cube`
-```
-sh get_data.sh
-```
-
-### 下载模型和稀疏参数序列文件
-```
-wget https://paddle-serving.bj.bcebos.com/unittest/ctr_cube_unittest.tar.gz
-tar xf ctr_cube_unittest.tar.gz
-mv models/ctr_client_conf ./
-mv models/ctr_serving_model_kv ./
-mv models/data ./cube/
-```
-执行脚本后会在当前目录有ctr_server_model_kv和ctr_client_config文件夹。
-
-### 启动稀疏参数索引服务
-```
-wget https://paddle-serving.bj.bcebos.com/others/cube_app.tar.gz
-tar xf cube_app.tar.gz
-mv cube_app/cube* ./cube/
-sh cube_prepare.sh &
-```
-
-此处,模型当中的稀疏参数会被存放在稀疏参数索引服务Cube当中。
-
-### 启动RPC预测服务,服务端线程数为4(可在test_server.py配置)
-
-```
-python3 test_server.py ctr_serving_model_kv
-```
-
-### 执行预测
-
-```
-python3 test_client.py ctr_client_conf/serving_client_conf.prototxt ./raw_data
-```
-
-### Benchmark
-
-设备 :Intel(R) Xeon(R) CPU 6148 @ 2.40GHz
-
-模型 :[Criteo CTR](https://github.com/PaddlePaddle/Serving/blob/develop/python/examples/criteo_ctr_with_cube/network_conf.py)
-
-server core/thread num : 4/8
-
-执行
-```
-bash benchmark.sh
-```
-客户端每个线程会发送1000个batch
-
-| client thread num | prepro | client infer | op0 | op1 | op2 | postpro | avg_latency | qps |
-| ------------------ | ------ | ------------ | ------ | ----- | ------ | ------- | ----- | ----- |
-| 1 | 0.035 | 1.596 | 0.021 | 0.518 | 0.0024 | 0.0025 | 6.774 | 147.7 |
-| 2 | 0.034 | 1.780 | 0.027 | 0.463 | 0.0020 | 0.0023 | 6.931 | 288.3 |
-| 4 | 0.038 | 2.954 | 0.025 | 0.455 | 0.0019 | 0.0027 | 8.378 | 477.5 |
-| 8 | 0.044 | 8.230 | 0.028 | 0.464 | 0.0023 | 0.0034 | 14.191 | 563.8 |
-| 16 | 0.048 | 21.037 | 0.028 | 0.455 | 0.0025 | 0.0041 | 27.236 | 587.5 |
-
-平均每个线程耗时图如下
-
-![avg cost](../../../doc/images/criteo-cube-benchmark-avgcost.png)
-
-每个线程QPS耗时如下
-
-![qps](../../../doc/images/criteo-cube-benchmark-qps.png)
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/criteo_reader.py b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/criteo_reader.py
deleted file mode 100755
index 2a80af78a9c2033bf246f703ca70a817ab786af3..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/criteo_reader.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import sys
-import paddle.fluid.incubate.data_generator as dg
-
-
-class CriteoDataset(dg.MultiSlotDataGenerator):
- def setup(self, sparse_feature_dim):
- self.cont_min_ = [0, -3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- self.cont_max_ = [
- 20, 600, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
- ]
- self.cont_diff_ = [
- 20, 603, 100, 50, 64000, 500, 100, 50, 500, 10, 10, 10, 50
- ]
- self.hash_dim_ = sparse_feature_dim
- # here, training data are lines with line_index < train_idx_
- self.train_idx_ = 41256555
- self.continuous_range_ = range(1, 14)
- self.categorical_range_ = range(14, 40)
-
- def _process_line(self, line):
- features = line.rstrip('\n').split('\t')
- dense_feature = []
- sparse_feature = []
- for idx in self.continuous_range_:
- if features[idx] == '':
- dense_feature.append(0.0)
- else:
- dense_feature.append((float(features[idx]) - self.cont_min_[idx - 1]) / \
- self.cont_diff_[idx - 1])
- for idx in self.categorical_range_:
- sparse_feature.append(
- [hash(str(idx) + features[idx]) % self.hash_dim_])
-
- return dense_feature, sparse_feature, [int(features[0])]
-
- def infer_reader(self, filelist, batch, buf_size):
- def local_iter():
- for fname in filelist:
- with open(fname.strip(), "r") as fin:
- for line in fin:
- dense_feature, sparse_feature, label = self._process_line(
- line)
- #yield dense_feature, sparse_feature, label
- yield [dense_feature] + sparse_feature + [label]
-
- import paddle
- batch_iter = paddle.batch(
- paddle.reader.shuffle(
- local_iter, buf_size=buf_size),
- batch_size=batch)
- return batch_iter
-
- def generate_sample(self, line):
- def data_iter():
- dense_feature, sparse_feature, label = self._process_line(line)
- feature_name = ["dense_input"]
- for idx in self.categorical_range_:
- feature_name.append("C" + str(idx - 13))
- feature_name.append("label")
- yield zip(feature_name, [dense_feature] + sparse_feature + [label])
-
- return data_iter
-
-
-if __name__ == "__main__":
- criteo_dataset = CriteoDataset()
- criteo_dataset.setup(int(sys.argv[1]))
- criteo_dataset.run_from_stdin()
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/conf/cube.conf b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/conf/cube.conf
deleted file mode 100755
index b70f6e34247e410f9b80054010338d3c8f452ec6..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/conf/cube.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-[{
- "dict_name": "test_dict",
- "shard": 1,
- "dup": 1,
- "timeout": 200,
- "retry": 3,
- "backup_request": 100,
- "type": "ipport_list",
- "load_balancer": "rr",
- "nodes": [{
- "ipport_list": "list://127.0.0.1:8027"
- }]
-}]
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/conf/gflags.conf b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/conf/gflags.conf
deleted file mode 100755
index 21c7bddebd8f22b91d0ba26a6121007f96a4380b..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/conf/gflags.conf
+++ /dev/null
@@ -1,4 +0,0 @@
---port=8027
---dict_split=1
---in_mem=true
---log_dir=./log/
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/keys b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/keys
deleted file mode 100755
index f00c965d8307308469e537302baa73048488f162..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube/keys
+++ /dev/null
@@ -1,10 +0,0 @@
-1
-2
-3
-4
-5
-6
-7
-8
-9
-10
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube_prepare.sh b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube_prepare.sh
deleted file mode 100755
index 773baba4d91b02b244e766cd8ebf899cc740dbbc..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/cube_prepare.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-#! /bin/bash
-
-mkdir -p cube_model
-mkdir -p cube/data
-./cube/cube-builder -dict_name=test_dict -job_mode=base -last_version=0 -cur_version=0 -depend_version=0 -input_path=./cube_model -output_path=${PWD}/cube/data -shard_num=1 -only_build=false
-cd cube && ./cube
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/get_data.sh b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/get_data.sh
deleted file mode 100755
index 1f244b3a4aa81488bb493825576ba30c4b3bba22..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/get_data.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/data/ctr_prediction/ctr_data.tar.gz
-tar -zxvf ctr_data.tar.gz
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/local_train.py b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/local_train.py
deleted file mode 100755
index 555e2e929c170c24a3175a88144ff74356d82514..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/local_train.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from __future__ import print_function
-
-from args import parse_args
-import os
-import paddle.fluid as fluid
-import paddle
-import sys
-from network_conf import dnn_model
-
-dense_feature_dim = 13
-
-paddle.enable_static()
-def train():
- args = parse_args()
- sparse_only = args.sparse_only
- if not os.path.isdir(args.model_output_dir):
- os.mkdir(args.model_output_dir)
- dense_input = fluid.layers.data(
- name="dense_input", shape=[dense_feature_dim], dtype='float32')
- sparse_input_ids = [
- fluid.layers.data(
- name="C" + str(i), shape=[1], lod_level=1, dtype="int64")
- for i in range(1, 27)
- ]
- label = fluid.layers.data(name='label', shape=[1], dtype='int64')
-
- #nn_input = None if sparse_only else dense_input
- nn_input = dense_input
- predict_y, loss, auc_var, batch_auc_var, infer_vars = dnn_model(
- nn_input, sparse_input_ids, label, args.embedding_size,
- args.sparse_feature_dim)
-
- optimizer = fluid.optimizer.SGD(learning_rate=1e-4)
- optimizer.minimize(loss)
-
- exe = fluid.Executor(fluid.CPUPlace())
- exe.run(fluid.default_startup_program())
- dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
- dataset.set_use_var([dense_input] + sparse_input_ids + [label])
-
- python_executable = "python3.6"
- pipe_command = "{} criteo_reader.py {}".format(python_executable,
- args.sparse_feature_dim)
-
- dataset.set_pipe_command(pipe_command)
- dataset.set_batch_size(128)
- thread_num = 10
- dataset.set_thread(thread_num)
-
- whole_filelist = [
- "raw_data/part-%d" % x for x in range(len(os.listdir("raw_data")))
- ]
-
- print(whole_filelist)
- dataset.set_filelist(whole_filelist[:100])
- dataset.load_into_memory()
- fluid.layers.Print(auc_var)
- epochs = 1
- for i in range(epochs):
- exe.train_from_dataset(
- program=fluid.default_main_program(), dataset=dataset, debug=True)
- print("epoch {} finished".format(i))
-
- import paddle_serving_client.io as server_io
- feed_var_dict = {}
- feed_var_dict['dense_input'] = dense_input
- for i, sparse in enumerate(sparse_input_ids):
- feed_var_dict["embedding_{}.tmp_0".format(i)] = sparse
- fetch_var_dict = {"prob": predict_y}
-
- feed_kv_dict = {}
- feed_kv_dict['dense_input'] = dense_input
- for i, emb in enumerate(infer_vars):
- feed_kv_dict["embedding_{}.tmp_0".format(i)] = emb
- fetch_var_dict = {"prob": predict_y}
-
- server_io.save_model("ctr_serving_model", "ctr_client_conf", feed_var_dict,
- fetch_var_dict, fluid.default_main_program())
-
- server_io.save_model("ctr_serving_model_kv", "ctr_client_conf_kv",
- feed_kv_dict, fetch_var_dict,
- fluid.default_main_program())
-
-
-if __name__ == '__main__':
- train()
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/network_conf.py b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/network_conf.py
deleted file mode 100755
index 2975533a72ad21d6dd5896446fd06c1f9bdfe8b4..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/network_conf.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import paddle.fluid as fluid
-import math
-
-
-def dnn_model(dense_input, sparse_inputs, label, embedding_size,
- sparse_feature_dim):
- def embedding_layer(input):
- emb = fluid.layers.embedding(
- input=input,
- is_sparse=True,
- is_distributed=False,
- size=[sparse_feature_dim, embedding_size],
- param_attr=fluid.ParamAttr(
- name="SparseFeatFactors",
- initializer=fluid.initializer.Uniform()))
- x = fluid.layers.sequence_pool(input=emb, pool_type='sum')
- return emb, x
-
- def mlp_input_tensor(emb_sums, dense_tensor):
- #if isinstance(dense_tensor, fluid.Variable):
- # return fluid.layers.concat(emb_sums, axis=1)
- #else:
- return fluid.layers.concat(emb_sums + [dense_tensor], axis=1)
-
- def mlp(mlp_input):
- fc1 = fluid.layers.fc(input=mlp_input,
- size=400,
- act='relu',
- param_attr=fluid.ParamAttr(
- initializer=fluid.initializer.Normal(
- scale=1 / math.sqrt(mlp_input.shape[1]))))
- fc2 = fluid.layers.fc(input=fc1,
- size=400,
- act='relu',
- param_attr=fluid.ParamAttr(
- initializer=fluid.initializer.Normal(
- scale=1 / math.sqrt(fc1.shape[1]))))
- fc3 = fluid.layers.fc(input=fc2,
- size=400,
- act='relu',
- param_attr=fluid.ParamAttr(
- initializer=fluid.initializer.Normal(
- scale=1 / math.sqrt(fc2.shape[1]))))
- pre = fluid.layers.fc(input=fc3,
- size=2,
- act='softmax',
- param_attr=fluid.ParamAttr(
- initializer=fluid.initializer.Normal(
- scale=1 / math.sqrt(fc3.shape[1]))))
- return pre
-
- emb_pair_sums = list(map(embedding_layer, sparse_inputs))
- emb_sums = [x[1] for x in emb_pair_sums]
- infer_vars = [x[0] for x in emb_pair_sums]
- mlp_in = mlp_input_tensor(emb_sums, dense_input)
- predict = mlp(mlp_in)
- cost = fluid.layers.cross_entropy(input=predict, label=label)
- avg_cost = fluid.layers.reduce_sum(cost)
- accuracy = fluid.layers.accuracy(input=predict, label=label)
- auc_var, batch_auc_var, auc_states = \
- fluid.layers.auc(input=predict, label=label, num_thresholds=2 ** 12, slide_steps=20)
- return predict, avg_cost, auc_var, batch_auc_var, infer_vars
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/test_client.py b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/test_client.py
deleted file mode 100755
index f12d727a3d2c4f6fce013d1f815f8b589a327dd5..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/test_client.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client import Client
-import sys
-import os
-import criteo_reader as criteo
-import time
-from paddle_serving_client.metric import auc
-import numpy as np
-py_version = sys.version_info[0]
-
-client = Client()
-client.load_client_config(sys.argv[1])
-client.connect(["127.0.0.1:9292"])
-
-batch = 1
-buf_size = 100
-dataset = criteo.CriteoDataset()
-dataset.setup(1000001)
-test_filelists = ["{}/part-0".format(sys.argv[2])]
-reader = dataset.infer_reader(test_filelists, batch, buf_size)
-label_list = []
-prob_list = []
-start = time.time()
-for ei in range(100):
- if py_version == 2:
- data = reader().next()
- else:
- data = reader().__next__()
- feed_dict = {}
- feed_dict['dense_input'] = np.array(data[0][0]).reshape(1, len(data[0][0]))
-
- for i in range(1, 27):
- feed_dict["embedding_{}.tmp_0".format(i - 1)] = np.array(data[0][i]).reshape(len(data[0][i]))
- feed_dict["embedding_{}.tmp_0.lod".format(i - 1)] = [0, len(data[0][i])]
- fetch_map = client.predict(feed=feed_dict, fetch=["prob"],batch=True)
- print(fetch_map)
- prob_list.append(fetch_map['prob'][0][1])
- label_list.append(data[0][-1][0])
-
-
-end = time.time()
-print(end - start)
-
diff --git a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/test_server.py b/examples/Cpp/PaddleRec/criteo_ctr_with_cube/test_server.py
deleted file mode 100755
index 479c602910b5afa52b35a66d00316f54905c0741..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleRec/criteo_ctr_with_cube/test_server.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import os
-import sys
-from paddle_serving_server import OpMaker
-from paddle_serving_server import OpSeqMaker
-from paddle_serving_server import Server
-
-op_maker = OpMaker()
-read_op = op_maker.create('general_reader')
-general_dist_kv_infer_op = op_maker.create('general_dist_kv_infer')
-response_op = op_maker.create('general_response')
-
-op_seq_maker = OpSeqMaker()
-op_seq_maker.add_op(read_op)
-op_seq_maker.add_op(general_dist_kv_infer_op)
-op_seq_maker.add_op(response_op)
-
-server = Server()
-server.set_op_sequence(op_seq_maker.get_op_sequence())
-server.set_num_threads(4)
-server.load_model_config(sys.argv[1])
-server.prepare_server(
- workdir="work_dir1",
- port=9292,
- device="cpu",
- cube_conf="./cube/conf/cube.conf")
-server.run_server()
diff --git a/examples/Cpp/PaddleSeg/deeplabv3/N0060.jpg b/examples/Cpp/PaddleSeg/deeplabv3/N0060.jpg
deleted file mode 100644
index feac2837eaa5ae5db414d9769a0c5a830dde268d..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleSeg/deeplabv3/N0060.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleSeg/deeplabv3/README.md b/examples/Cpp/PaddleSeg/deeplabv3/README.md
deleted file mode 100644
index 08022618fcec5220667ca19bfb803cba36519c7b..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/deeplabv3/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Image Segmentation
-
-## Get Model
-
-```
-python3 -m paddle_serving_app.package --get_model deeplabv3
-tar -xzvf deeplabv3.tar.gz
-```
-
-## RPC Service
-
-### Start Service
-
-```
-python3 -m paddle_serving_server.serve --model deeplabv3_server --gpu_ids 0 --port 9494
-```
-
-### Client Prediction
-
-```
-python3 deeplabv3_client.py
-```
diff --git a/examples/Cpp/PaddleSeg/deeplabv3/README_CN.md b/examples/Cpp/PaddleSeg/deeplabv3/README_CN.md
deleted file mode 100644
index 16f11daba354349f1b73f8bba00cac8ff5c88864..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/deeplabv3/README_CN.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# 图像分割
-
-## 获取模型
-
-```
-python3 -m paddle_serving_app.package --get_model deeplabv3
-tar -xzvf deeplabv3.tar.gz
-```
-
-## RPC 服务
-
-### 启动服务端
-
-```
-python3 -m paddle_serving_server.serve --model deeplabv3_server --gpu_ids 0 --port 9494
-```
-
-### 客户端预测
-
-```
-python3 deeplabv3_client.py
diff --git a/examples/Cpp/PaddleSeg/deeplabv3/deeplabv3_client.py b/examples/Cpp/PaddleSeg/deeplabv3/deeplabv3_client.py
deleted file mode 100644
index 77e25d5f5a24d0aa1dad8939c1e7845eaf5e4122..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/deeplabv3/deeplabv3_client.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-from paddle_serving_app.reader import Sequential, File2Image, Resize, Transpose, BGR2RGB, SegPostprocess
-import sys
-import cv2
-
-client = Client()
-client.load_client_config("deeplabv3_client/serving_client_conf.prototxt")
-client.connect(["127.0.0.1:9494"])
-
-preprocess = Sequential(
- [File2Image(), Resize(
- (512, 512), interpolation=cv2.INTER_LINEAR)])
-
-postprocess = SegPostprocess(2)
-
-filename = "N0060.jpg"
-im = preprocess(filename)
-fetch_map = client.predict(feed={"image": im}, fetch=["output"])
-fetch_map["filename"] = filename
-postprocess(fetch_map)
diff --git a/examples/Cpp/PaddleSeg/unet_for_image_seg/N0060.jpg b/examples/Cpp/PaddleSeg/unet_for_image_seg/N0060.jpg
deleted file mode 100644
index feac2837eaa5ae5db414d9769a0c5a830dde268d..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleSeg/unet_for_image_seg/N0060.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleSeg/unet_for_image_seg/README.md b/examples/Cpp/PaddleSeg/unet_for_image_seg/README.md
deleted file mode 100644
index 59004712bd76f5388d6e57947f70ce22562f8dbe..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/unet_for_image_seg/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Image Segmentation
-
-## Get Model
-
-```
-python3 -m paddle_serving_app.package --get_model unet
-tar -xzvf unet.tar.gz
-```
-
-## RPC Service
-
-### Start Service
-
-```
-python3 -m paddle_serving_server.serve --model unet_model --gpu_ids 0 --port 9494
-```
-
-### Client Prediction
-
-```
-python3 seg_client.py
-```
diff --git a/examples/Cpp/PaddleSeg/unet_for_image_seg/README_CN.md b/examples/Cpp/PaddleSeg/unet_for_image_seg/README_CN.md
deleted file mode 100644
index 53c2f1893a879d5585cea0b77103fc1461086784..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/unet_for_image_seg/README_CN.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# 图像分割
-
-## 获取模型
-
-```
-python3 -m paddle_serving_app.package --get_model unet
-tar -xzvf unet.tar.gz
-```
-
-## RPC 服务
-
-### 启动服务端
-
-```
-python3 -m paddle_serving_server.serve --model unet_model --gpu_ids 0 --port 9494
-```
-
-### 客户端预测
-
-```
-python3 seg_client.py
-```
diff --git a/examples/Cpp/PaddleSeg/unet_for_image_seg/seg_client.py b/examples/Cpp/PaddleSeg/unet_for_image_seg/seg_client.py
deleted file mode 100644
index 44f634b6090159ee1bd37c176eebb7d2b7f37065..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/unet_for_image_seg/seg_client.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-from paddle_serving_app.reader import Sequential, File2Image, Resize, Transpose, BGR2RGB, SegPostprocess
-import sys
-import cv2
-
-client = Client()
-client.load_client_config("unet_client/serving_client_conf.prototxt")
-client.connect(["127.0.0.1:9494"])
-
-preprocess = Sequential(
- [File2Image(), Resize(
- (512, 512), interpolation=cv2.INTER_LINEAR)])
-
-postprocess = SegPostprocess(2)
-
-filename = "N0060.jpg"
-im = preprocess(filename)
-fetch_map = client.predict(feed={"image": im}, fetch=["output"])
-fetch_map["filename"] = filename
-postprocess(fetch_map)
diff --git a/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/README.md b/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/README.md
deleted file mode 100644
index edb2af5864db746dc3368423dd7414575ed7b675..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-#UNET_BENCHMARK 使用说明
-## 功能
-* benchmark测试
-## 注意事项
-* 示例图片(可以有多张)请放置于与img_data路径中,支持jpg,jpeg
-* 图片张数应该大于等于并发数量
-## TODO
-* http benchmark
diff --git a/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/img_data/N0060.jpg b/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/img_data/N0060.jpg
deleted file mode 100644
index feac2837eaa5ae5db414d9769a0c5a830dde268d..0000000000000000000000000000000000000000
Binary files a/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/img_data/N0060.jpg and /dev/null differ
diff --git a/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/launch_benckmark.sh b/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/launch_benckmark.sh
deleted file mode 100644
index 59c2293e34b11dd2efd088c97a3c8de0dc62cf6f..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/launch_benckmark.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-python unet_benchmark.py --thread 1 --batch_size 1 --model ../unet_client/serving_client_conf.prototxt
-# thread/batch can be modified as you wish
diff --git a/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/unet_benchmark.py b/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/unet_benchmark.py
deleted file mode 100644
index 172643e364c5462aeed59ebe5e7b45bee7abf8ef..0000000000000000000000000000000000000000
--- a/examples/Cpp/PaddleSeg/unet_for_image_seg/unet_benchmark/unet_benchmark.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
- unet bench mark script
- 20201130 first edition by cg82616424
-"""
-from __future__ import unicode_literals, absolute_import
-import os
-import time
-import json
-import requests
-from paddle_serving_client import Client
-from paddle_serving_client.utils import MultiThreadRunner
-from paddle_serving_client.utils import benchmark_args, show_latency
-from paddle_serving_app.reader import Sequential, File2Image, Resize, Transpose, BGR2RGB, SegPostprocess
-args = benchmark_args()
-
-
-def get_img_names(path):
- """
- Brief:
- get img files(jpg) under this path
- if any exception happened return None
- Args:
- path (string): image file path
- Returns:
- list: images names under this folder
- """
- if not os.path.exists(path):
- return None
- if not os.path.isdir(path):
- return None
- list_name = []
- for f_handler in os.listdir(path):
- file_path = os.path.join(path, f_handler)
- if os.path.isdir(file_path):
- continue
- else:
- if not file_path.endswith(".jpeg") and not file_path.endswith(
- ".jpg"):
- continue
- list_name.append(file_path)
- return list_name
-
-
-def preprocess_img(img_list):
- """
- Brief:
- prepare img data for benchmark
- Args:
- img_list(list): list for img file path
- Returns:
- image content binary list after preprocess
- """
- preprocess = Sequential([File2Image(), Resize((512, 512))])
- result_list = []
- for img in img_list:
- img_tmp = preprocess(img)
- result_list.append(img_tmp)
- return result_list
-
-
-def benckmark_worker(idx, resource):
- """
- Brief:
- benchmark single worker for unet
- Args:
- idx(int): worker idx ,use idx to select backend unet service
- resource(dict): unet serving endpoint dict
- Returns:
- latency
- TODO:
- http benckmarks
- """
- profile_flags = False
- latency_flags = False
- postprocess = SegPostprocess(2)
- if os.getenv("FLAGS_profile_client"):
- profile_flags = True
- if os.getenv("FLAGS_serving_latency"):
- latency_flags = True
- latency_list = []
- client_handler = Client()
- client_handler.load_client_config(args.model)
- client_handler.connect(
- [resource["endpoint"][idx % len(resource["endpoint"])]])
- start = time.time()
- turns = resource["turns"]
- img_list = resource["img_list"]
- for i in range(turns):
- if args.batch_size >= 1:
- l_start = time.time()
- feed_batch = []
- b_start = time.time()
- for bi in range(args.batch_size):
- feed_batch.append({"image": img_list[bi]})
- b_end = time.time()
- if profile_flags:
- sys.stderr.write(
- "PROFILE\tpid:{}\tunt_pre_0:{} unet_pre_1:{}\n".format(
- os.getpid(),
- int(round(b_start * 1000000)),
- int(round(b_end * 1000000))))
- result = client_handler.predict(
- feed={"image": img_list[bi]}, fetch=["output"])
- #result["filename"] = "./img_data/N0060.jpg" % (os.getpid(), idx, time.time())
- #postprocess(result) # if you want to measure post process time, you have to uncomment this line
- l_end = time.time()
- if latency_flags:
- latency_list.append(l_end * 1000 - l_start * 1000)
- else:
- print("unsupport batch size {}".format(args.batch_size))
- end = time.time()
- if latency_flags:
- return [[end - start], latency_list]
- else:
- return [[end - start]]
-
-
-if __name__ == '__main__':
- """
- usage:
- """
- img_file_list = get_img_names("./img_data")
- img_content_list = preprocess_img(img_file_list)
- multi_thread_runner = MultiThreadRunner()
- endpoint_list = ["127.0.0.1:9494"]
- turns = 1
- start = time.time()
- result = multi_thread_runner.run(benckmark_worker, args.thread, {
- "endpoint": endpoint_list,
- "turns": turns,
- "img_list": img_content_list
- })
- end = time.time()
- total_cost = end - start
- avg_cost = 0
- for i in range(args.thread):
- avg_cost += result[0][i]
- avg_cost = avg_cost / args.thread
- print("total cost: {}s".format(total_cost))
- print("each thread cost: {}s. ".format(avg_cost))
- print("qps: {}samples/s".format(args.batch_size * args.thread * turns /
- total_cost))
- if os.getenv("FLAGS_serving_latency"):
- show_latency(result[1])
diff --git a/examples/Cpp/encryption/README.md b/examples/Cpp/encryption/README.md
deleted file mode 100644
index 3120422ebfaa2a88851eda18c42e7740fe29e884..0000000000000000000000000000000000000000
--- a/examples/Cpp/encryption/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-# Encryption Model Prediction
-
-([简体中文](README_CN.md)|English)
-
-## Get Origin Model
-
-The example uses the model file of the fit_a_line example as a origin model
-
-```
-sh get_data.sh
-```
-
-## Encrypt Model
-
-The `paddlepaddle` package is used in this example, you may need to download the corresponding package(`pip3 install paddlepaddle`).
-
-[python3 encrypt.py](./encrypt.py)
-
-[//file]:#encrypt.py
-``` python
-def serving_encryption():
- inference_model_to_serving(
- dirname="./uci_housing_model",
- params_filename=None,
- serving_server="encrypt_server",
- serving_client="encrypt_client",
- encryption=True)
-```
-dirname is the folder path where the model is located. If the parameter is discrete, it is unnecessary to specify params_filename, else you need to set `params_filename="__params__"`.
-
-The key is stored in the `key` file, and the encrypted model file and server-side configuration file are stored in the `encrypt_server` directory.
-client-side configuration file are stored in the `encrypt_client` directory.
-
-**Notice:** When encryption prediction is used, the model configuration and parameter folder loaded by server and client should be encrypt_server/ and encrypt_client/
-## Start Encryption Service
-CPU Service
-```
-python3 -m paddle_serving_server.serve --model encrypt_server/ --port 9393 --use_encryption_model
-```
-GPU Service
-```
-python3 -m paddle_serving_server.serve --model encrypt_server/ --port 9393 --use_encryption_model --gpu_ids 0
-```
-
-## Prediction
-```
-python3 test_client.py encrypt_client/serving_client_conf.prototxt
-```
diff --git a/examples/Cpp/encryption/README_CN.md b/examples/Cpp/encryption/README_CN.md
deleted file mode 100644
index ad82d49b61cb70093a9423ad83dbc30663b6d4f1..0000000000000000000000000000000000000000
--- a/examples/Cpp/encryption/README_CN.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# 加密模型预测
-
-(简体中文|[English](README.md))
-
-## 获取明文模型
-
-示例中使用fit_a_line示例的模型文件作为明文模型
-
-```
-sh get_data.sh
-```
-
-## 模型加密
-本示例中使用了`paddlepaddle`包中的模块,需要进行下载(`pip3 install paddlepaddle`)。
-
-运行[python3 encrypt.py](./encrypt.py)进行模型加密
-
-[//file]:#encrypt.py
-``` python
-def serving_encryption():
- inference_model_to_serving(
- dirname="./uci_housing_model",
- params_filename=None,
- serving_server="encrypt_server",
- serving_client="encrypt_client",
- encryption=True)
-```
-其中dirname为模型所在的文件夹路径
-
-当参数为离散参数时,无须指定params_filename,当参数为__params__时,需指定`params_filename="__params__"`
-
-密钥保存在`key`文件中,加密模型文件以及server端配置文件保存在`encrypt_server`目录下,client端配置文件保存在`encrypt_client`目录下。
-
-**注意:** 当使用加密预测时,服务端和客户端启动加载的模型配置和参数文件夹是encrypt_server/和encrypt_client/
-
-## 启动加密预测服务
-CPU预测服务
-```
-python3 -m paddle_serving_server.serve --model encrypt_server/ --port 9393 --use_encryption_model
-```
-GPU预测服务
-```
-python3 -m paddle_serving_server.serve --model encrypt_server/ --port 9393 --use_encryption_model --gpu_ids 0
-```
-
-## 预测
-```
-python3 test_client.py encrypt_client/serving_client_conf.prototxt
-```
diff --git a/examples/Cpp/encryption/encrypt.py b/examples/Cpp/encryption/encrypt.py
deleted file mode 100644
index e233784390f0899cd81ec7862ceef0d506bbcd1f..0000000000000000000000000000000000000000
--- a/examples/Cpp/encryption/encrypt.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client.io import inference_model_to_serving
-
-
-def serving_encryption():
- inference_model_to_serving(
- dirname="./uci_housing_model",
- params_filename=None,
- serving_server="encrypt_server",
- serving_client="encrypt_client",
- encryption=True)
-
-
-if __name__ == "__main__":
- serving_encryption()
diff --git a/examples/Cpp/encryption/get_data.sh b/examples/Cpp/encryption/get_data.sh
deleted file mode 100644
index c3cd5c236f5643d53c3a30bf0ffd367853ffaf13..0000000000000000000000000000000000000000
--- a/examples/Cpp/encryption/get_data.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing_example/encrypt.tar.gz
-tar -xzf encrypt.tar.gz
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
-tar -xzf uci_housing.tar.gz
diff --git a/examples/Cpp/encryption/test_client.py b/examples/Cpp/encryption/test_client.py
deleted file mode 100644
index 33816e741c9a6ffeda0685d5fd3bda6774a5f186..0000000000000000000000000000000000000000
--- a/examples/Cpp/encryption/test_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client import Client
-import sys
-
-client = Client()
-client.load_client_config(sys.argv[1])
-client.use_key("./key")
-client.connect(["0.0.0.0:9393"], encryption=True)
-fetch_list = client.get_fetch_names()
-
-import paddle
-test_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.uci_housing.test(), buf_size=500),
- batch_size=1)
-
-for data in test_reader():
- fetch_map = client.predict(feed={"x": data[0][0]}, fetch=fetch_list)
- print(fetch_map)
diff --git a/examples/Cpp/fit_a_line/README.md b/examples/Cpp/fit_a_line/README.md
deleted file mode 100644
index 9586cd670240eb43e4a706ff89ea435b7a8c6d1c..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# Fit a line prediction example
-
-([简体中文](./README_CN.md)|English)
-
-## Get data
-
-```shell
-sh get_data.sh
-```
-
-
-
-## RPC service
-
-### Start server
-
-```shell
-python3 -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9393
-```
-
-## Client prediction
-
-### RPC Client
-The `paddlepaddle` package is used in `test_client.py`, and you may need to download the corresponding package(`pip3 install paddlepaddle`).
-
-``` shell
-python3 test_client.py uci_housing_client/serving_client_conf.prototxt
-```
-
-### Http Client
-
-``` shell
-python3 test_httpclient.py uci_housing_client/serving_client_conf.prototxt
-```
-
-
-## Benchmark
-``` shell
-bash benchmark.sh uci_housing_model uci_housing_client
-```
-The log file of benchmark named `profile_log_uci_housing_model`
diff --git a/examples/Cpp/fit_a_line/README_CN.md b/examples/Cpp/fit_a_line/README_CN.md
deleted file mode 100755
index d1cace5e2c5b5cee2195deaa1667af68e5f1f067..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/README_CN.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# 线性回归预测服务示例
-
-(简体中文|[English](./README.md))
-
-## 获取数据
-
-```shell
-sh get_data.sh
-```
-
-
-## 开启服务端(支持BRPC-Client/GRPC Client/Http-Client)
-
-```shell
-python3 -m paddle_serving_server.serve --model uci_housing_model --thread 10 --port 9393
-```
-
-## 客户端预测
-
-### BRPC-Client
-
-`test_client.py`中使用了`paddlepaddle`包,需要进行下载(`pip3 install paddlepaddle`)。
-
-``` shell
-python3 test_client.py uci_housing_client/serving_client_conf.prototxt
-```
-
-### GRPC-Client/Http-Client
-
-``` shell
-python3 test_httpclient.py uci_housing_client/serving_client_conf.prototxt
-```
-
-
-## 性能测试
-``` shell
-bash benchmark.sh uci_housing_model uci_housing_client
-```
-性能测试的日志文件为profile_log_uci_housing_model
-
-如需修改性能测试用例的参数,请修改benchmark.sh中的配置信息。
-
-注意:uci_housing_model和uci_housing_client路径后不要加'/'符号,示例需要在GPU机器上运行。
diff --git a/examples/Cpp/fit_a_line/benchmark.py b/examples/Cpp/fit_a_line/benchmark.py
deleted file mode 100644
index 7c4e4b4c582361f2f0f5d48fb374b2e7899c65b2..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/benchmark.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client import Client
-from paddle_serving_client.utils import MultiThreadRunner
-from paddle_serving_client.utils import benchmark_args, show_latency
-import time
-import paddle
-import sys
-import requests
-
-args = benchmark_args()
-
-
-def single_func(idx, resource):
- train_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.uci_housing.train(), buf_size=500),
- batch_size=1)
- total_number = sum(1 for _ in train_reader())
- latency_list = []
-
- if args.request == "rpc":
- client = Client()
- client.load_client_config(args.model)
- client.connect([args.endpoint])
- start = time.time()
- for data in train_reader():
- l_start = time.time()
- fetch_map = client.predict(feed={"x": data[0][0]}, fetch=["price"])
- l_end = time.time()
- latency_list.append(l_end * 1000 - l_start * 1000)
- end = time.time()
- return [[end - start], latency_list, [total_number]]
- elif args.request == "http":
- train_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.uci_housing.train(), buf_size=500),
- batch_size=1)
- start = time.time()
- for data in train_reader():
- l_start = time.time()
- r = requests.post(
- 'http://{}/uci/prediction'.format(args.endpoint),
- data={"x": data[0]})
- l_end = time.time()
- latency_list.append(l_end * 1000 - l_start * 1000)
- end = time.time()
- return [[end - start], latency_list, [total_number]]
-
-
-start = time.time()
-multi_thread_runner = MultiThreadRunner()
-result = multi_thread_runner.run(single_func, args.thread, {})
-end = time.time()
-total_cost = end - start
-avg_cost = 0
-for i in range(args.thread):
- avg_cost += result[0][i]
-avg_cost = avg_cost / args.thread
-
-print("total cost: {}s".format(total_cost))
-print("each thread cost: {}s. ".format(avg_cost))
-print("qps: {}samples/s".format(args.batch_size * args.thread / total_cost))
-show_latency(result[1])
diff --git a/examples/Cpp/fit_a_line/benchmark.sh b/examples/Cpp/fit_a_line/benchmark.sh
deleted file mode 100644
index 7e374db3ee5a5bdccdc75dc2884b9dbbfcb60eca..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/benchmark.sh
+++ /dev/null
@@ -1,55 +0,0 @@
-rm profile_log*
-export CUDA_VISIBLE_DEVICES=0,1
-export FLAGS_profile_server=1
-export FLAGS_profile_client=1
-export FLAGS_serving_latency=1
-
-gpu_id=0
-#save cpu and gpu utilization log
-if [ -d utilization ];then
- rm -rf utilization
-else
- mkdir utilization
-fi
-#start server
-$PYTHONROOT/bin/python3 -m paddle_serving_server.serve --model $1 --port 9292 --thread 4 --gpu_ids 0,1 --mem_optim --ir_optim > elog 2>&1 &
-sleep 5
-
-#warm up
-$PYTHONROOT/bin/python3 benchmark.py --thread 4 --batch_size 1 --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1
-echo -e "import psutil\nimport time\nwhile True:\n\tcpu_res = psutil.cpu_percent()\n\twith open('cpu.txt', 'a+') as f:\n\t\tf.write(f'{cpu_res}\\\n')\n\ttime.sleep(0.1)" > cpu.py
-for thread_num in 1 4 8 16
-do
-for batch_size in 1 4 16 64
-do
- job_bt=`date '+%Y%m%d%H%M%S'`
- nvidia-smi --id=0 --query-compute-apps=used_memory --format=csv -lms 100 > gpu_memory_use.log 2>&1 &
- nvidia-smi --id=0 --query-gpu=utilization.gpu --format=csv -lms 100 > gpu_utilization.log 2>&1 &
- rm -rf cpu.txt
- $PYTHONROOT/bin/python3 cpu.py &
- gpu_memory_pid=$!
- $PYTHONROOT/bin/python3 benchmark.py --thread $thread_num --batch_size $batch_size --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1
- kill `ps -ef|grep used_memory|awk '{print $2}'` > /dev/null
- kill `ps -ef|grep utilization.gpu|awk '{print $2}'` > /dev/null
- kill `ps -ef|grep cpu.py|awk '{print $2}'` > /dev/null
- echo "model_name:" $1
- echo "thread_num:" $thread_num
- echo "batch_size:" $batch_size
- echo "=================Done===================="
- echo "model_name:$1" >> profile_log_$1
- echo "batch_size:$batch_size" >> profile_log_$1
- job_et=`date '+%Y%m%d%H%M%S'`
- awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "CPU_UTILIZATION:", max}' cpu.txt >> profile_log_$1
- awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "MAX_GPU_MEMORY:", max}' gpu_memory_use.log >> profile_log_$1
- awk 'BEGIN {max = 0} {if(NR>1){if ($1 > max) max=$1}} END {print "GPU_UTILIZATION:", max}' gpu_utilization.log >> profile_log_$1
- rm -rf gpu_use.log gpu_utilization.log
- $PYTHONROOT/bin/python3 ../util/show_profile.py profile $thread_num >> profile_log_$1
- tail -n 8 profile >> profile_log_$1
- echo "" >> profile_log_$1
-done
-done
-
-#Divided log
-awk 'BEGIN{RS="\n\n"}{i++}{print > "bert_log_"i}' profile_log_$1
-mkdir bert_log && mv bert_log_* bert_log
-ps -ef|grep 'serving'|grep -v grep|cut -c 9-15 | xargs kill -9
diff --git a/examples/Cpp/fit_a_line/get_data.sh b/examples/Cpp/fit_a_line/get_data.sh
deleted file mode 100644
index 84a3966a0ef323cef4b146d8e9489c70a7a8ae35..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/get_data.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/uci_housing.tar.gz
-tar -xzf uci_housing.tar.gz
diff --git a/examples/Cpp/fit_a_line/local_train.py b/examples/Cpp/fit_a_line/local_train.py
deleted file mode 100644
index 3e0f8880a4d006b346712f2592d6c44986882193..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/local_train.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import sys
-import paddle
-import paddle.fluid as fluid
-paddle.enable_static()
-train_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.uci_housing.train(), buf_size=500),
- batch_size=16)
-
-test_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.uci_housing.test(), buf_size=500),
- batch_size=16)
-
-x = fluid.data(name='x', shape=[None, 13], dtype='float32')
-y = fluid.data(name='y', shape=[None, 1], dtype='float32')
-
-y_predict = fluid.layers.fc(input=x, size=1, act=None)
-cost = fluid.layers.square_error_cost(input=y_predict, label=y)
-avg_loss = fluid.layers.mean(cost)
-sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.01)
-sgd_optimizer.minimize(avg_loss)
-
-place = fluid.CPUPlace()
-feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
-exe = fluid.Executor(place)
-exe.run(fluid.default_startup_program())
-
-import paddle_serving_client.io as serving_io
-
-for pass_id in range(30):
- for data_train in train_reader():
- avg_loss_value, = exe.run(fluid.default_main_program(),
- feed=feeder.feed(data_train),
- fetch_list=[avg_loss])
-
-serving_io.save_model("uci_housing_model", "uci_housing_client", {"x": x},
- {"price": y_predict}, fluid.default_main_program())
diff --git a/examples/Cpp/fit_a_line/test_client.py b/examples/Cpp/fit_a_line/test_client.py
deleted file mode 100755
index d18ece66686520e25a0b9ebbd2d8b29354f4da16..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/test_client.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client import Client
-import sys
-import numpy as np
-
-client = Client()
-client.load_client_config(sys.argv[1])
-client.connect(["127.0.0.1:9393"])
-fetch_list = client.get_fetch_names()
-import paddle
-test_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.uci_housing.test(), buf_size=500),
- batch_size=1)
-
-for data in test_reader():
- new_data = np.zeros((1, 13)).astype("float32")
- new_data[0] = data[0][0]
- fetch_map = client.predict(
- feed={"x": new_data}, fetch=fetch_list, batch=True)
- print(fetch_map)
diff --git a/examples/Cpp/fit_a_line/test_httpclient.py b/examples/Cpp/fit_a_line/test_httpclient.py
deleted file mode 100755
index c9f785dc99e2699027862fd2a28bd429e8b1a0a5..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/test_httpclient.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_client.httpclient import HttpClient
-import sys
-import numpy as np
-import time
-
-client = HttpClient()
-client.load_client_config(sys.argv[1])
-'''
-if you want use GRPC-client, set_use_grpc_client(True)
-or you can directly use client.grpc_client_predict(...)
-as for HTTP-client,set_use_grpc_client(False)(which is default)
-or you can directly use client.http_client_predict(...)
-'''
-#client.set_use_grpc_client(True)
-'''
-if you want to enable Encrypt Module,uncommenting the following line
-'''
-#client.use_key("./key")
-'''
-if you want to compress,uncommenting the following line
-'''
-#client.set_response_compress(True)
-#client.set_request_compress(True)
-'''
-we recommend use Proto data format in HTTP-body, set True(which is default)
-if you want use JSON data format in HTTP-body, set False
-'''
-#client.set_http_proto(True)
-client.connect(["127.0.0.1:9393"])
-fetch_list = client.get_fetch_names()
-
-import paddle
-test_reader = paddle.batch(
- paddle.reader.shuffle(
- paddle.dataset.uci_housing.test(), buf_size=500),
- batch_size=1)
-for data in test_reader():
- new_data = np.zeros((1, 13)).astype("float32")
- new_data[0] = data[0][0]
- fetch_map = client.predict(
- feed={"x": new_data}, fetch=fetch_list, batch=True)
- print(fetch_map)
- break
diff --git a/examples/Cpp/fit_a_line/test_multi_process_client.py b/examples/Cpp/fit_a_line/test_multi_process_client.py
deleted file mode 100644
index e6120266097f8fdd446998741582a9e396cd2efd..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/test_multi_process_client.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-from paddle_serving_client.utils import MultiThreadRunner
-import paddle
-import numpy as np
-
-
-def single_func(idx, resource):
- client = Client()
- client.load_client_config(
- "./uci_housing_client/serving_client_conf.prototxt")
- client.connect(["127.0.0.1:9293", "127.0.0.1:9292"])
- x = [
- 0.0137, -0.1136, 0.2553, -0.0692, 0.0582, -0.0727, -0.1583, -0.0584,
- 0.6283, 0.4919, 0.1856, 0.0795, -0.0332
- ]
- x = np.array(x)
- for i in range(1000):
- fetch_map = client.predict(feed={"x": x}, fetch=["price"])
- if fetch_map is None:
- return [[None]]
- return [[0]]
-
-
-multi_thread_runner = MultiThreadRunner()
-thread_num = 4
-result = multi_thread_runner.run(single_func, thread_num, {})
-if None in result[0]:
- exit(1)
diff --git a/examples/Cpp/fit_a_line/test_server.py b/examples/Cpp/fit_a_line/test_server.py
deleted file mode 100644
index d055b309d7530ccbe928d50e2bcaba23fb1ddaff..0000000000000000000000000000000000000000
--- a/examples/Cpp/fit_a_line/test_server.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-from paddle_serving_server.web_service import WebService
-import numpy as np
-
-
-class UciService(WebService):
- def preprocess(self, feed=[], fetch=[]):
- feed_batch = []
- is_batch = True
- new_data = np.zeros((len(feed), 1, 13)).astype("float32")
- for i, ins in enumerate(feed):
- nums = np.array(ins["x"]).reshape(1, 1, 13)
- new_data[i] = nums
- feed = {"x": new_data}
- return feed, fetch, is_batch
-
-
-uci_service = UciService(name="uci")
-uci_service.load_model_config("uci_housing_model")
-uci_service.prepare_server(workdir="workdir", port=9393)
-uci_service.run_rpc_service()
-uci_service.run_web_service()
diff --git a/examples/Cpp/imdb/README.md b/examples/Cpp/imdb/README.md
deleted file mode 100755
index 573ac47db37d23406e66fb1605ac60ea58189ffa..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-## IMDB comment sentiment inference service
-
-([简体中文](./README_CN.md)|English)
-
-### Get model files and sample data
-
-```
-sh get_data.sh
-```
-the package downloaded contains cnn, lstm and bow model config along with their test_data and train_data.
-
-### Start inference service(Support BRPC-Client/GRPC-Client/Http-Client)
-
-```
-python3 -m paddle_serving_server.serve --model imdb_cnn_model/ --port 9292
-```
-### BRPC-Client Infer
-```
-head test_data/part-0 | python3 test_client.py imdb_cnn_client_conf/serving_client_conf.prototxt imdb.vocab
-```
-
-it will get predict results of the first 10 test cases.
-
-
-### GRPC-Client/Http-Client Infer
-```
-head test_data/part-0 | python3 test_http_client.py imdb_cnn_client_conf/serving_client_conf.prototxt imdb.vocab
-```
diff --git a/examples/Cpp/imdb/README_CN.md b/examples/Cpp/imdb/README_CN.md
deleted file mode 100755
index a1fecc8af35dcd2f5a38f47480b9b80b3cf96054..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/README_CN.md
+++ /dev/null
@@ -1,26 +0,0 @@
-## IMDB评论情绪预测服务
-
-(简体中文|[English](./README.md))
-
-### 获取模型文件和样例数据
-
-```
-sh get_data.sh
-```
-脚本会下载和解压出cnn、lstm和bow三种模型的配置文文件以及test_data和train_data。
-
-### 启动预测服务(支持BRPC-Client/GRPC-Client/Http-Client)
-
-```
-python3 -m paddle_serving_server.serve --model imdb_cnn_model/ --port 9292
-```
-### BRPC-Client预测
-```
-head test_data/part-0 | python3 test_client.py imdb_cnn_client_conf/serving_client_conf.prototxt imdb.vocab
-```
-预测test_data/part-0的前十个样例。
-
-### BRPC-Client预测
-```
-head test_data/part-0 | python3 test_http_client.py imdb_cnn_client_conf/serving_client_conf.prototxt imdb.vocab
-```
diff --git a/examples/Cpp/imdb/abtest_client.py b/examples/Cpp/imdb/abtest_client.py
deleted file mode 100644
index f5f721b67966f1da72e19b66e014e5b72d802323..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/abtest_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_client import Client
-import numpy as np
-
-client = Client()
-client.load_client_config('imdb_bow_client_conf/serving_client_conf.prototxt')
-client.add_variant("bow", ["127.0.0.1:8000"], 10)
-client.add_variant("lstm", ["127.0.0.1:9000"], 90)
-client.connect()
-
-print('please wait for about 10s')
-with open('processed.data') as f:
- cnt = {"bow": {'acc': 0, 'total': 0}, "lstm": {'acc': 0, 'total': 0}}
- for line in f:
- word_ids, label = line.split(';')
- word_ids = [int(x) for x in word_ids.split(',')]
- word_len = len(word_ids)
- feed = {
- "words": np.array(word_ids).reshape(word_len, 1),
- "words.lod": [0, word_len]
- }
- fetch = ["acc", "cost", "prediction"]
- [fetch_map, tag] = client.predict(feed=feed, fetch=fetch, need_variant_tag=True,batch=True)
- if (float(fetch_map["prediction"][0][1]) - 0.5) * (float(label[0]) - 0.5) > 0:
- cnt[tag]['acc'] += 1
- cnt[tag]['total'] += 1
-
- for tag, data in cnt.items():
- print('[{}](total: {}) acc: {}'.format(tag, data['total'], float(data['acc'])/float(data['total']) ))
diff --git a/examples/Cpp/imdb/abtest_get_data.py b/examples/Cpp/imdb/abtest_get_data.py
deleted file mode 100644
index c6bd7ea57b86d0df0dd2ae842bee8bd98daa910e..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/abtest_get_data.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from paddle_serving_app.reader.imdb_reader import IMDBDataset
-imdb_dataset = IMDBDataset()
-imdb_dataset.load_resource('imdb.vocab')
-
-with open('test_data/part-0') as fin:
- with open('processed.data', 'w') as fout:
- for line in fin:
- word_ids, label = imdb_dataset.get_words_and_label(line)
- fout.write("{};{}\n".format(','.join([str(x) for x in word_ids]), label[0]))
diff --git a/examples/Cpp/imdb/benchmark.py b/examples/Cpp/imdb/benchmark.py
deleted file mode 100644
index 18584f88ea51373ffe2ca2e75946342c94464d76..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/benchmark.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import os
-import sys
-import time
-import requests
-import numpy as np
-from paddle_serving_app.reader.imdb_reader import IMDBDataset
-from paddle_serving_client import Client
-from paddle_serving_client.utils import MultiThreadRunner
-from paddle_serving_client.utils import MultiThreadRunner, benchmark_args, show_latency
-
-args = benchmark_args()
-
-
-def single_func(idx, resource):
- imdb_dataset = IMDBDataset()
- imdb_dataset.load_resource("./imdb.vocab")
- dataset = []
- with open("./test_data/part-0") as fin:
- for line in fin:
- dataset.append(line.strip())
- profile_flags = False
- latency_flags = False
- if os.getenv("FLAGS_profile_client"):
- profile_flags = True
- if os.getenv("FLAGS_serving_latency"):
- latency_flags = True
- latency_list = []
- start = time.time()
- if args.request == "rpc":
- client = Client()
- client.load_client_config(args.model)
- client.connect([args.endpoint])
- for i in range(1000):
- if args.batch_size >= 1:
- feed_batch = []
- feed = {"words": [], "words.lod": [0]}
- for bi in range(args.batch_size):
- word_ids, label = imdb_dataset.get_words_and_label(dataset[
- bi])
- feed["words.lod"].append(feed["words.lod"][-1] + len(
- word_ids))
- feed["words"].extend(word_ids)
- feed["words"] = np.array(feed["words"]).reshape(
- len(feed["words"]), 1)
- result = client.predict(
- feed=feed, fetch=["prediction"], batch=True)
- if result is None:
- raise ("predict failed.")
- else:
- print("unsupport batch size {}".format(args.batch_size))
-
- elif args.request == "http":
- if args.batch_size >= 1:
- feed_batch = []
- for bi in range(args.batch_size):
- feed_batch.append({"words": dataset[bi]})
- r = requests.post(
- "http://{}/imdb/prediction".format(args.endpoint),
- json={"feed": feed_batch,
- "fetch": ["prediction"]})
- if r.status_code != 200:
- print('HTTP status code -ne 200')
- raise ("predict failed.")
- else:
- print("unsupport batch size {}".format(args.batch_size))
- end = time.time()
- return [[end - start]]
-
-
-if __name__ == '__main__':
- multi_thread_runner = MultiThreadRunner()
- endpoint_list = [
- "127.0.0.1:9292", "127.0.0.1:9293", "127.0.0.1:9294", "127.0.0.1:9295"
- ]
- turns = 100
- start = time.time()
- result = multi_thread_runner.run(
- single_func, args.thread, {"endpoint": endpoint_list,
- "turns": turns})
- end = time.time()
- total_cost = end - start
- avg_cost = 0
- for i in range(args.thread):
- avg_cost += result[0][i]
- avg_cost = avg_cost / args.thread
-
- print("total cost: {}".format(total_cost))
- print("each thread cost: {}".format(avg_cost))
- print("qps: {}samples/s".format(args.batch_size * args.thread * turns /
- total_cost))
- if os.getenv("FLAGS_serving_latency"):
- show_latency(result[0])
diff --git a/examples/Cpp/imdb/benchmark.sh b/examples/Cpp/imdb/benchmark.sh
deleted file mode 100644
index 7db9a1086314047930bee32fe8c695c2b71753bf..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/benchmark.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-rm profile_log*
-export FLAGS_profile_server=1
-export FLAGS_profile_client=1
-export FLAGS_serving_latency=1
-$PYTHONROOT/bin/python3 -m paddle_serving_server.serve --model $1 --port 9292 --thread 4 --mem_optim --ir_optim 2> elog > stdlog &
-hostname=`echo $(hostname)|awk -F '.baidu.com' '{print $1}'`
-#save cpu and gpu utilization log
-if [ -d utilization ];then
- rm -rf utilization
-else
- mkdir utilization
-fi
-sleep 5
-
-
-#warm up
-$PYTHONROOT/bin/python3 benchmark.py --thread 4 --batch_size 1 --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1
-echo -e "import psutil\ncpu_utilization=psutil.cpu_percent(1,False)\nprint('CPU_UTILIZATION:', cpu_utilization)\n" > cpu_utilization.py
-
-for thread_num in 1 4 8 16
-do
-for batch_size in 1 4 16 64
-do
- job_bt=`date '+%Y%m%d%H%M%S'`
- $PYTHONROOT/bin/python3 benchmark.py --thread $thread_num --batch_size $batch_size --model $2/serving_client_conf.prototxt --request rpc > profile 2>&1
- echo "model_name:" $1
- echo "thread_num:" $thread_num
- echo "batch_size:" $batch_size
- echo "=================Done===================="
- echo "model_name:$1" >> profile_log_$1
- echo "batch_size:$batch_size" >> profile_log_$1
- job_et=`date '+%Y%m%d%H%M%S'`
- $PYTHONROOT/bin/python3 ../util/show_profile.py profile $thread_num >> profile_log_$1
- $PYTHONROOT/bin/python3 cpu_utilization.py >> profile_log_$1
- tail -n 8 profile >> profile_log_$1
- echo "" >> profile_log_$1
-done
-done
-
-#Divided log
-awk 'BEGIN{RS="\n\n"}{i++}{print > "imdb_log_"i}' profile_log_$1
-mkdir $1_log && mv imdb_log_* $1_log
-ps -ef|grep 'serving'|grep -v grep|cut -c 9-15 | xargs kill -9
diff --git a/examples/Cpp/imdb/clean_data.sh b/examples/Cpp/imdb/clean_data.sh
deleted file mode 100644
index 6d2c8d7a7ff195f91796483ce8caf9cc5fa0317f..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/clean_data.sh
+++ /dev/null
@@ -1 +0,0 @@
-rm -rf imdb.vocab kvdb log *.pyc serving_client_conf serving_server_model test_data text_classification_data.tar.gz train_data work_dir1
diff --git a/examples/Cpp/imdb/get_data.sh b/examples/Cpp/imdb/get_data.sh
deleted file mode 100644
index 81d8d5d3b018f133c41e211d1501cf3cd9a3d8a4..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/get_data.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-wget --no-check-certificate https://fleet.bj.bcebos.com/text_classification_data.tar.gz
-wget --no-check-certificate https://paddle-serving.bj.bcebos.com/imdb-demo/imdb_model.tar.gz
-tar -zxvf text_classification_data.tar.gz
-tar -zxvf imdb_model.tar.gz
diff --git a/examples/Cpp/imdb/imdb_reader.py b/examples/Cpp/imdb/imdb_reader.py
deleted file mode 100644
index a4ef3e163a50b0dc244ac2653df1e38d7f91699b..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/imdb_reader.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-
-import sys
-import os
-import paddle
-import re
-import paddle.fluid.incubate.data_generator as dg
-
-py_version = sys.version_info[0]
-
-
-class IMDBDataset(dg.MultiSlotDataGenerator):
- def load_resource(self, dictfile):
- self._vocab = {}
- wid = 0
- if py_version == 2:
- with open(dictfile) as f:
- for line in f:
- self._vocab[line.strip()] = wid
- wid += 1
- else:
- with open(dictfile, encoding="utf-8") as f:
- for line in f:
- self._vocab[line.strip()] = wid
- wid += 1
- self._unk_id = len(self._vocab)
- self._pattern = re.compile(r'(;|,|\.|\?|!|\s|\(|\))')
- self.return_value = ("words", [1, 2, 3, 4, 5, 6]), ("label", [0])
-
- def get_words_only(self, line):
- sent = line.lower().replace("
", " ").strip()
- words = [x for x in self._pattern.split(sent) if x and x != " "]
- feas = [
- self._vocab[x] if x in self._vocab else self._unk_id for x in words
- ]
- return feas
-
- def get_words_and_label(self, line):
- send = '|'.join(line.split('|')[:-1]).lower().replace("
",
- " ").strip()
- label = [int(line.split('|')[-1])]
-
- words = [x for x in self._pattern.split(send) if x and x != " "]
- feas = [
- self._vocab[x] if x in self._vocab else self._unk_id for x in words
- ]
- return feas, label
-
- def infer_reader(self, infer_filelist, batch, buf_size):
- def local_iter():
- for fname in infer_filelist:
- with open(fname, "r") as fin:
- for line in fin:
- feas, label = self.get_words_and_label(line)
- yield feas, label
-
- import paddle
- batch_iter = paddle.batch(
- paddle.reader.shuffle(
- local_iter, buf_size=buf_size),
- batch_size=batch)
- return batch_iter
-
- def generate_sample(self, line):
- def memory_iter():
- for i in range(1000):
- yield self.return_value
-
- def data_iter():
- feas, label = self.get_words_and_label(line)
- yield ("words", feas), ("label", label)
-
- return data_iter
-
-
-if __name__ == "__main__":
- imdb = IMDBDataset()
- imdb.load_resource("imdb.vocab")
- imdb.run_from_stdin()
diff --git a/examples/Cpp/imdb/local_train.py b/examples/Cpp/imdb/local_train.py
deleted file mode 100644
index 98333e4e3440ff464b796619736dee46002ae2a4..0000000000000000000000000000000000000000
--- a/examples/Cpp/imdb/local_train.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# pylint: disable=doc-string-missing
-import os
-import sys
-import paddle
-import logging
-import paddle.fluid as fluid
-
-logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
-logger = logging.getLogger("fluid")
-logger.setLevel(logging.INFO)
-paddle.enable_static()
-
-def load_vocab(filename):
- vocab = {}
- with open(filename) as f:
- wid = 0
- for line in f:
- vocab[line.strip()] = wid
- wid += 1
- vocab["