diff --git a/CMakeLists.txt b/CMakeLists.txt index af065158699199af61aca02f563dda1b1cddf2b1..7c497e3e048c4dd8d5c1291286de2ab9d218b914 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,6 +85,17 @@ include(generic) include(flags) endif() +if (APP) +include(external/zlib) +include(external/boost) +include(external/protobuf) +include(external/gflags) +include(external/glog) +include(external/pybind11) +include(external/python) +include(generic) +endif() + if (SERVER) include(external/cudnn) include(paddlepaddle) diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt index ce2e5e3814ae1e585976c5d9c8848b506293ee67..56296b53319fb185c772ffa10e8b31c8203862fb 100644 --- a/core/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -23,6 +23,11 @@ add_subdirectory(pdcodegen) add_subdirectory(sdk-cpp) endif() +if (APP) +add_subdirectory(configure) +endif() + + if(CLIENT) add_subdirectory(general-client) endif() diff --git a/core/configure/CMakeLists.txt b/core/configure/CMakeLists.txt index b6384fc99ea3df6d71a61865e3aabf5b39b510dd..d3e5b75da96ad7a0789866a4a2c474fad988c21b 100644 --- a/core/configure/CMakeLists.txt +++ b/core/configure/CMakeLists.txt @@ -1,3 +1,4 @@ +if (SERVER OR CLIENT) LIST(APPEND protofiles ${CMAKE_CURRENT_LIST_DIR}/proto/server_configure.proto ${CMAKE_CURRENT_LIST_DIR}/proto/sdk_configure.proto @@ -28,6 +29,7 @@ FILE(GLOB inc ${CMAKE_CURRENT_BINARY_DIR}/*.pb.h) install(FILES ${inc} DESTINATION ${PADDLE_SERVING_INSTALL_DIR}/include/configure) +endif() py_proto_compile(general_model_config_py_proto SRCS proto/general_model_config.proto) add_custom_target(general_model_config_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) @@ -51,6 +53,14 @@ add_custom_command(TARGET general_model_config_py_proto POST_BUILD endif() +if (APP) +add_custom_command(TARGET general_model_config_py_proto POST_BUILD + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app/proto + COMMAND cp *.py ${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app/proto + COMMENT "Copy generated general_model_config proto file into directory paddle_serving_app/proto." + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) +endif() + if (SERVER) py_proto_compile(server_config_py_proto SRCS proto/server_configure.proto) add_custom_target(server_config_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) diff --git a/python/examples/faster_rcnn_model/new_test_client.py b/python/examples/faster_rcnn_model/new_test_client.py index 197530a6fa0d87bfe6ca7bf57fa903479fdaa86f..0c6c615f8f3dff10626256de59101c401457509f 100755 --- a/python/examples/faster_rcnn_model/new_test_client.py +++ b/python/examples/faster_rcnn_model/new_test_client.py @@ -22,13 +22,19 @@ preprocess = Sequential([ ]) postprocess = RCNNPostprocess("label_list.txt", "output") - client = Client() -client.load_client_config("faster_rcnn_client_conf/serving_client_conf.prototxt") + +client.load_client_config( + "faster_rcnn_client_conf/serving_client_conf.prototxt") client.connect(['127.0.0.1:9393']) im = preprocess(sys.argv[2]) -fetch_map = client.predict(feed={"image": im, "im_info": np.array(list(im.shape[1:]) + [1.0]), - "im_shape": np.array(list(im.shape[1:]) + [1.0])}, fetch=["multiclass_nms"]) +fetch_map = client.predict( + feed={ + "image": im, + "im_info": np.array(list(im.shape[1:]) + [1.0]), + "im_shape": np.array(list(im.shape[1:]) + [1.0]) + }, + fetch=["multiclass_nms"]) fetch_map["image"] = sys.argv[1] postprocess(fetch_map) diff --git a/python/examples/resnet_v2_50/resnet50_debug.py b/python/examples/resnet_v2_50/resnet50_debug.py new file mode 100644 index 0000000000000000000000000000000000000000..62cb1812c5718ae1f9e10e9e9a57d7c1ae6736b7 --- /dev/null +++ b/python/examples/resnet_v2_50/resnet50_debug.py @@ -0,0 +1,31 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle_serving_app.reader import Sequential, File2Image, Resize, CenterCrop +from paddle_serving_app.reader import RGB2BGR, Transpose, Div, Normalize +from paddle_serving_app import Debugger +import sys + +debugger = Debugger() +debugger.load_model_config(sys.argv[1], gpu=True) + +seq = Sequential([ + File2Image(), Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), + Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], True) +]) + +image_file = "daisy.jpg" +img = seq(image_file) +fetch_map = debugger.predict(feed={"image": img}, fetch=["feature_map"]) +print(fetch_map["feature_map"].reshape(-1)) diff --git a/python/paddle_serving_app/__init__.py b/python/paddle_serving_app/__init__.py index 860876030695baee15d3ace68c6af386290cfbb0..fd9260284b4103f00ca8b9cda8b99173591d23eb 100644 --- a/python/paddle_serving_app/__init__.py +++ b/python/paddle_serving_app/__init__.py @@ -16,3 +16,4 @@ from .reader.image_reader import ImageReader, File2Image, URL2Image, Sequential, from .reader.lac_reader import LACReader from .reader.senta_reader import SentaReader from .models import ServingModels +from .local_predict import Debugger diff --git a/python/paddle_serving_app/local_predict.py b/python/paddle_serving_app/local_predict.py new file mode 100644 index 0000000000000000000000000000000000000000..133aa4ccf32d29538d5b7032874f2c770e55e184 --- /dev/null +++ b/python/paddle_serving_app/local_predict.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +""" +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" + +import os +import google.protobuf.text_format +import numpy as np +import argparse +import paddle.fluid as fluid +from .proto import general_model_config_pb2 as m_config +from paddle.fluid.core import PaddleTensor +from paddle.fluid.core import AnalysisConfig +from paddle.fluid.core import create_paddle_predictor +import logging + +logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") +logger = logging.getLogger("fluid") +logger.setLevel(logging.INFO) + + +class Debugger(object): + def __init__(self): + self.feed_names_ = [] + self.fetch_names_ = [] + self.feed_types_ = {} + self.fetch_types_ = {} + self.feed_shapes_ = {} + self.feed_names_to_idx_ = {} + self.fetch_names_to_idx_ = {} + self.fetch_names_to_type_ = {} + + def load_model_config(self, model_path, gpu=False, profile=True, cpu_num=1): + client_config = "{}/serving_server_conf.prototxt".format(model_path) + model_conf = m_config.GeneralModelConfig() + f = open(client_config, 'r') + model_conf = google.protobuf.text_format.Merge( + str(f.read()), model_conf) + config = AnalysisConfig(model_path) + + self.feed_names_ = [var.alias_name for var in model_conf.feed_var] + self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var] + self.feed_names_to_idx_ = {} + self.fetch_names_to_idx_ = {} + + for i, var in enumerate(model_conf.feed_var): + self.feed_names_to_idx_[var.alias_name] = i + self.feed_types_[var.alias_name] = var.feed_type + self.feed_shapes_[var.alias_name] = var.shape + + for i, var in enumerate(model_conf.fetch_var): + self.fetch_names_to_idx_[var.alias_name] = i + self.fetch_names_to_type_[var.alias_name] = var.fetch_type + + if not gpu: + config.disable_gpu() + else: + config.enable_use_gpu(100, 0) + if profile: + config.enable_profile() + config.set_cpu_math_library_num_threads(cpu_num) + + self.predictor = create_paddle_predictor(config) + + def predict(self, feed=None, fetch=None): + if feed is None or fetch is None: + raise ValueError("You should specify feed and fetch for prediction") + fetch_list = [] + if isinstance(fetch, str): + fetch_list = [fetch] + elif isinstance(fetch, list): + fetch_list = fetch + else: + raise ValueError("Fetch only accepts string and list of string") + + feed_batch = [] + if isinstance(feed, dict): + feed_batch.append(feed) + elif isinstance(feed, list): + feed_batch = feed + else: + raise ValueError("Feed only accepts dict and list of dict") + + int_slot_batch = [] + float_slot_batch = [] + int_feed_names = [] + float_feed_names = [] + int_shape = [] + float_shape = [] + fetch_names = [] + counter = 0 + batch_size = len(feed_batch) + + for key in fetch_list: + if key in self.fetch_names_: + fetch_names.append(key) + + if len(fetch_names) == 0: + raise ValueError( + "Fetch names should not be empty or out of saved fetch list.") + return {} + + inputs = [] + for name in self.feed_names_: + inputs.append(PaddleTensor(feed[name][np.newaxis, :])) + + outputs = self.predictor.run(inputs) + fetch_map = {} + for name in fetch: + fetch_map[name] = outputs[self.fetch_names_to_idx_[ + name]].as_ndarray() + return fetch_map diff --git a/python/setup.py.app.in b/python/setup.py.app.in index d981caa7c34ff1f84d4cdee0e64a2b03e47b7b66..77099e667e880f3f62ab4cde9d5ae3b6295d1b90 100644 --- a/python/setup.py.app.in +++ b/python/setup.py.app.in @@ -42,10 +42,11 @@ if '${PACK}' == 'ON': REQUIRED_PACKAGES = [ - 'six >= 1.10.0', 'sentencepiece' + 'six >= 1.10.0', 'sentencepiece', 'opencv-python', 'pillow' ] packages=['paddle_serving_app', + 'paddle_serving_app.proto', 'paddle_serving_app.reader', 'paddle_serving_app.utils', 'paddle_serving_app.models', @@ -54,6 +55,8 @@ packages=['paddle_serving_app', package_data={} package_dir={'paddle_serving_app': '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app', + 'paddle_serving_app.proto': + '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app/proto', 'paddle_serving_app.reader': '${PADDLE_SERVING_BINARY_DIR}/python/paddle_serving_app/reader', 'paddle_serving_app.utils':