diff --git a/doc/HTTP_SERVICE_CN.md b/doc/HTTP_SERVICE_CN.md old mode 100644 new mode 100755 index e8050a6d48275224b2dabe2298b5d8eb9ddccc80..d6da41748f877f476e15e14eebad37834a0dab39 --- a/doc/HTTP_SERVICE_CN.md +++ b/doc/HTTP_SERVICE_CN.md @@ -42,7 +42,7 @@ python3.6 -m paddle_serving_server.serve --model uci_housing_model --thread 10 - 为了方便用户快速的使用Http方式请求Server端预测服务,我们已经将常用的Http请求的数据体封装、压缩、请求加密等功能封装为一个HttpClient类提供给用户,方便用户使用。 -使用HttpClient最简单只需要三步,1、创建一个HttpClient对象。2、加载Client端的prototxt配置文件(本例中为python/examples/fit_a_line/目录下的uci_housing_client/serving_client_conf.prototxt),3、调用Predict函数,通过Http方式请求预测服务。 +使用HttpClient最简单只需要四步,1、创建一个HttpClient对象。2、加载Client端的prototxt配置文件(本例中为python/examples/fit_a_line/目录下的uci_housing_client/serving_client_conf.prototxt)。3、调用coonect函数。4、调用Predict函数,通过Http方式请求预测服务。 此外,您可以根据自己的需要配置Server端IP、Port、服务名称(此服务名称需要与[`core/general-server/proto/general_model_service.proto`](../core/general-server/proto/general_model_service.proto)文件中的Service服务名和rpc方法名对应,即`GeneralModelService`字段和`inference`字段),设置Request数据体压缩,设置Response支持压缩传输,模型加密预测(需要配置Server端使用模型加密)、设置响应超时时间等功能。 @@ -103,7 +103,7 @@ repeated int32 numbers = 1; ``` #### elem_type -表示数据类型,0 means int64, 1 means float32, 2 means int32, 3 means bytes(string) +表示数据类型,0 means int64, 1 means float32, 2 means int32, 20 means bytes(string) #### fetch_var_names diff --git a/java/src/main/java/io/paddle/serving/client/Client.java b/java/src/main/java/io/paddle/serving/client/Client.java index 63e861ba6199c7a56129c4d3b0cb03a77d26f6b7..af4ccc5246262336ef9df05aa65beb5b91de33fd 100755 --- a/java/src/main/java/io/paddle/serving/client/Client.java +++ b/java/src/main/java/io/paddle/serving/client/Client.java @@ -59,9 +59,20 @@ import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; -enum ElementType -{ - Int64_type, Float32_type, Int32_type, Bytes_type; +class ElementType { + public static final int Int64_type = 0; + public static final int Float32_type = 1; + public static final int Int32_type = 2; + public static final int String_type = 20; + public static final Map feedTypeToDataKey_; + static + { + feedTypeToDataKey_ = new HashMap(); + feedTypeToDataKey_.put(ElementType.Int64_type, "int64_data"); + feedTypeToDataKey_.put(ElementType.Float32_type, "float_data"); + feedTypeToDataKey_.put(ElementType.Int32_type, "int_data"); + feedTypeToDataKey_.put(ElementType.String_type, "data"); + } } class Profiler { @@ -104,7 +115,6 @@ public class Client { private Map feedTypes_; private Map> feedShapes_; private Map feedNameToIndex_; - private Map feedTypeToDataKey_; private List fetchNames_; private Map fetchTypes_; private Set lodTensorSet_; @@ -147,12 +157,6 @@ public class Client { channel_ = null; blockingStub_ = null; - feedTypeToDataKey_ = new HashMap(); - feedTypeToDataKey_.put(0, "int64_data"); - feedTypeToDataKey_.put(1, "float_data"); - feedTypeToDataKey_.put(2, "int_data"); - feedTypeToDataKey_.put(3, "data"); - profiler_ = new Profiler(); boolean is_profile = false; String FLAGS_profile_client = System.getenv("FLAGS_profile_client"); @@ -525,7 +529,7 @@ public class Client { jsonTensor.put("elem_type", element_type); // 处理数据与shape - String protoDataKey = feedTypeToDataKey_.get(element_type); + String protoDataKey = ElementType.feedTypeToDataKey_.get(element_type); // 如果是INDArray类型,先转为一维. // 此时shape为INDArray的shape if(objectValue instanceof INDArray){ @@ -535,11 +539,11 @@ public class Client { for(long dim:indarrayShape){ shape.add((int)dim); } - if(element_type == ElementType.Int64_type.ordinal()){ + if(element_type == ElementType.Int64_type){ objectValue = tempIndArray.data().asLong(); - }else if(element_type == ElementType.Int32_type.ordinal()){ + }else if(element_type == ElementType.Int32_type){ objectValue = tempIndArray.data().asInt(); - }else if(element_type == ElementType.Float32_type.ordinal()){ + }else if(element_type == ElementType.Float32_type){ objectValue = tempIndArray.data().asFloat(); }else{ throw new Exception("INDArray 类型不支持"); @@ -564,11 +568,11 @@ public class Client { // 此时无法获取batch信息,故对shape不处理 // 由于Proto中为Repeated,需要把数据包装成list if(objectValue instanceof String){ - if(feedTypes_.get(protoDataKey)!= ElementType.Bytes_type.ordinal()){ + if(feedTypes_.get(protoDataKey)!= ElementType.String_type){ throw new Exception("feedvar is not string-type,feed can`t be a single string."); } }else{ - if(feedTypes_.get(protoDataKey)== ElementType.Bytes_type.ordinal()){ + if(feedTypes_.get(protoDataKey)== ElementType.String_type){ throw new Exception("feedvar is string-type,feed, feed can`t be a single int or others."); } } @@ -662,17 +666,17 @@ public class Client { for(long dim:indarrayShape){ shape.add((int)dim); } - if(element_type == ElementType.Int64_type.ordinal()){ + if(element_type == ElementType.Int64_type){ List iter = Arrays.stream(tempIndArray.data().asLong()).boxed().collect(Collectors.toList()); tensor_builder.addAllInt64Data(iter); - }else if(element_type == ElementType.Int32_type.ordinal()){ + }else if(element_type == ElementType.Int32_type){ List iter = Arrays.stream(tempIndArray.data().asInt()).boxed().collect(Collectors.toList()); tensor_builder.addAllIntData(iter); - }else if(element_type == ElementType.Float32_type.ordinal()){ + }else if(element_type == ElementType.Float32_type){ List iter = Arrays.asList(ArrayUtils.toObject(tempIndArray.data().asFloat())); tensor_builder.addAllFloatData(iter); @@ -684,13 +688,13 @@ public class Client { // 如果是数组类型,则无须处理,直接使用即可。 // 且数组无法嵌套,此时batch无法从数据中获取 // 默认batch维度为1,或者feedVar的shape信息中已包含batch - if(element_type == ElementType.Int64_type.ordinal()){ + if(element_type == ElementType.Int64_type){ List iter = Arrays.stream((long[])objectValue).boxed().collect(Collectors.toList()); tensor_builder.addAllInt64Data(iter); - }else if(element_type == ElementType.Int32_type.ordinal()){ + }else if(element_type == ElementType.Int32_type){ List iter = Arrays.stream((int[])objectValue).boxed().collect(Collectors.toList()); tensor_builder.addAllIntData(iter); - }else if(element_type == ElementType.Float32_type.ordinal()){ + }else if(element_type == ElementType.Float32_type){ List iter = Arrays.asList(ArrayUtils.toObject((float[])objectValue)); tensor_builder.addAllFloatData(iter); }else{ @@ -707,11 +711,11 @@ public class Client { // 在index=0处,加上batch shape.add(0, list.size()); } - if(element_type == ElementType.Int64_type.ordinal()){ + if(element_type == ElementType.Int64_type){ tensor_builder.addAllInt64Data((List)(List)recursiveExtract(objectValue)); - }else if(element_type == ElementType.Int32_type.ordinal()){ + }else if(element_type == ElementType.Int32_type){ tensor_builder.addAllIntData((List)(List)recursiveExtract(objectValue)); - }else if(element_type == ElementType.Float32_type.ordinal()){ + }else if(element_type == ElementType.Float32_type){ tensor_builder.addAllFloatData((List)(List)recursiveExtract(objectValue)); }else{ // 看接口是String还是Bytes @@ -723,11 +727,11 @@ public class Client { // 由于Proto中为Repeated,需要把数据包装成list List tempList = new ArrayList<>(); tempList.add(objectValue); - if(element_type == ElementType.Int64_type.ordinal()){ + if(element_type == ElementType.Int64_type){ tensor_builder.addAllInt64Data((List)(List)tempList); - }else if(element_type == ElementType.Int32_type.ordinal()){ + }else if(element_type == ElementType.Int32_type){ tensor_builder.addAllIntData((List)(List)tempList); - }else if(element_type == ElementType.Float32_type.ordinal()){ + }else if(element_type == ElementType.Float32_type){ tensor_builder.addAllFloatData((List)(List)tempList); }else{ // 看接口是String还是Bytes diff --git a/python/examples/ocr/README.md b/python/examples/ocr/README.md old mode 100644 new mode 100755 index 630f01d999943b9948e153430b30d80fbabd0549..95cc210a7e68d5582e68460f2eec89419bf7fd7c --- a/python/examples/ocr/README.md +++ b/python/examples/ocr/README.md @@ -119,7 +119,7 @@ The pre-processing and post-processing is in the C + + server part, the image's so the value of parameter `feed_var` which is in the file `ocr_det_client/serving_client_conf.prototxt` should be changed. -for this case, `feed_type` should be 3(which means the data type is string),`shape` should be 1. +for this case, `feed_type` should be 20(which means the data type is string),`shape` should be 1. By passing in multiple client folder paths, the client can be started for multi model prediction. ``` diff --git a/python/examples/ocr/README_CN.md b/python/examples/ocr/README_CN.md old mode 100644 new mode 100755 index 421a4b930507abd3d36ef6db737f85a060647ced..5c0734c94aa6d61e1fdb9e8f87d5ee187c805ff0 --- a/python/examples/ocr/README_CN.md +++ b/python/examples/ocr/README_CN.md @@ -118,7 +118,7 @@ python3 -m paddle_serving_server.serve --model ocr_det_model ocr_rec_model --por 即`ocr_det_client/serving_client_conf.prototxt`中`feed_var`字段 -对于本示例而言,`feed_type`应修改为3(数据类型为string),`shape`为1. +对于本示例而言,`feed_type`应修改为20(数据类型为string),`shape`为1. 通过在客户端启动后加入多个client模型的client配置文件夹路径,启动client进行预测。 ``` diff --git a/python/paddle_serving_client/httpclient.py b/python/paddle_serving_client/httpclient.py index 9506cac376006a5e64c6aa76750e25a6351cbfbb..bb056a99732aeb1fa855b6ce1e020ada82072ed0 100755 --- a/python/paddle_serving_client/httpclient.py +++ b/python/paddle_serving_client/httpclient.py @@ -38,7 +38,12 @@ float32_type = 1 int32_type = 2 bytes_type = 20 # this is corresponding to the proto -proto_data_key_list = ["int64_data", "float_data", "int_data", "data"] +proto_data_key_list = { + 0: "int64_data", + 1: "float_data", + 2: "int_data", + 20: "data" +} def list_flatten(items, ignore_types=(str, bytes)):