diff --git a/cmake/paddlepaddle.cmake b/cmake/paddlepaddle.cmake index 3119c62f85a9f3616136fd0448811d76163f1195..0774db92b5a9132ef22bea8271bbfa0eb8ab57b5 100644 --- a/cmake/paddlepaddle.cmake +++ b/cmake/paddlepaddle.cmake @@ -30,7 +30,7 @@ message( "WITH_GPU = ${WITH_GPU}") # Paddle Version should be one of: # latest: latest develop build # version number like 1.5.2 -SET(PADDLE_VERSION "2.2.0") +SET(PADDLE_VERSION "2.2.2") if (WITH_GPU) message("CUDA: ${CUDA_VERSION}, CUDNN_MAJOR_VERSION: ${CUDNN_MAJOR_VERSION}") # cuda 11.0 is not supported, 11.2 would be added. @@ -58,9 +58,9 @@ elseif (WITH_LITE) message("cpu arch: ${CMAKE_SYSTEM_PROCESSOR}") if (WITH_XPU) if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") - SET(PADDLE_LIB_VERSION "x86-64_gcc8.2_avx_mkl") + SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}/cxx_c/Linux/XPU/x86-64_gcc8.2_py36_avx_mkl") elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64") - SET(PADDLE_LIB_VERSION "arm64_gcc7.3_openblas") + SET(PADDLE_LIB_VERSION "${PADDLE_VERSION}/cxx_c/Linux/XPU/arm64_gcc7.3_py36_openblas") endif() elseif (WITH_ASCEND_CL) if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") @@ -97,7 +97,7 @@ endif() if(WITH_LITE) if (WITH_XPU) - SET(PADDLE_LIB_PATH "https://paddle-inference-lib.bj.bcebos.com/2.2.0-rc0/cxx_c/Linux/XPU/${PADDLE_LIB_VERSION}/paddle_inference_install_dir.tar.gz ") + SET(PADDLE_LIB_PATH "https://paddle-inference-lib.bj.bcebos.com/${PADDLE_LIB_VERSION}/paddle_inference_install_dir.tar.gz ") elseif (WITH_ASCEND_CL) SET(PADDLE_LIB_PATH "http://paddle-serving.bj.bcebos.com/inferlib/${PADDLE_LIB_VERSION}/paddle_inference_install_dir.tgz ") endif() diff --git a/python/paddle_serving_app/local_predict.py b/python/paddle_serving_app/local_predict.py index 9081b8b3974e5590732f07eb7d867cd3c48c4191..5f922a28f849866fcd08a29b63c70a986d064c68 100644 --- a/python/paddle_serving_app/local_predict.py +++ b/python/paddle_serving_app/local_predict.py @@ -160,12 +160,12 @@ class LocalPredictor(object): "use_trt:{}, use_lite:{}, use_xpu:{}, precision:{}, use_calib:{}, " "use_mkldnn:{}, mkldnn_cache_capacity:{}, mkldnn_op_list:{}, " "mkldnn_bf16_op_list:{}, use_feed_fetch_ops:{}, " - "use_ascend_cl:{}, min_subgraph_size:{}, dynamic_shape_info:{}".format( - model_path, use_gpu, gpu_id, use_profile, thread_num, mem_optim, - ir_optim, use_trt, use_lite, use_xpu, precision, use_calib, - use_mkldnn, mkldnn_cache_capacity, mkldnn_op_list, - mkldnn_bf16_op_list, use_feed_fetch_ops, use_ascend_cl, - min_subgraph_size, dynamic_shape_info)) + "use_ascend_cl:{}, min_subgraph_size:{}, dynamic_shape_info:{}". + format(model_path, use_gpu, gpu_id, use_profile, thread_num, + mem_optim, ir_optim, use_trt, use_lite, use_xpu, precision, + use_calib, use_mkldnn, mkldnn_cache_capacity, mkldnn_op_list, + mkldnn_bf16_op_list, use_feed_fetch_ops, use_ascend_cl, + min_subgraph_size, dynamic_shape_info)) self.feed_names_ = [var.alias_name for var in model_conf.feed_var] self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var] @@ -236,10 +236,10 @@ class LocalPredictor(object): kill_stop_process_by_pid("kill", os.getpgid(os.getpid())) if len(dynamic_shape_info): - config.set_trt_dynamic_shape_info( - dynamic_shape_info['min_input_shape'], - dynamic_shape_info['max_input_shape'], - dynamic_shape_info['opt_input_shape']) + config.set_trt_dynamic_shape_info( + dynamic_shape_info['min_input_shape'], + dynamic_shape_info['max_input_shape'], + dynamic_shape_info['opt_input_shape']) # set lite if use_lite: config.enable_lite_engine( @@ -338,7 +338,8 @@ class LocalPredictor(object): # Assemble the input data of paddle predictor, and filter invalid inputs. input_names = self.predictor.get_input_names() for name in input_names: - if isinstance(feed[name], list): + if isinstance(feed[name], list) and not isinstance(feed[name][0], + str): feed[name] = np.array(feed[name]).reshape(self.feed_shapes_[ name]) if self.feed_types_[name] == 0: @@ -365,6 +366,9 @@ class LocalPredictor(object): feed[name] = feed[name].astype("complex64") elif self.feed_types_[name] == 11: feed[name] = feed[name].astype("complex128") + elif isinstance(feed[name], list) and isinstance(feed[name][0], + str): + pass else: raise ValueError("local predictor receives wrong data type") diff --git a/python/pipeline/channel.py b/python/pipeline/channel.py index 9ef1c09c48d4c851faf16c2621a0fa1dc561c201..8df5a6259f8a2b3c7048771c73e7d802b94ba8b7 100644 --- a/python/pipeline/channel.py +++ b/python/pipeline/channel.py @@ -34,6 +34,7 @@ from .error_catch import CustomExceptionCode as ChannelDataErrcode _LOGGER = logging.getLogger(__name__) + class ChannelDataType(enum.Enum): """ Channel data type @@ -167,7 +168,8 @@ class ChannelData(object): elif isinstance(npdata, dict): # batch_size = 1 for _, value in npdata.items(): - if not isinstance(value, np.ndarray): + if not isinstance(value, np.ndarray) and not (isinstance( + value, list) and isinstance(value[0], str)): error_code = ChannelDataErrcode.TYPE_ERROR.value error_info = "Failed to check data: the value " \ "of data must be np.ndarray, but get {}.".format(