未验证 提交 da7bfe66 编写于 作者: T TeslaZhao 提交者: GitHub

Merge pull request #1029 from TeslaZhao/v0.5.0

cherry-pick to PaddlePaddle/Serving:V0.5.0
...@@ -43,25 +43,21 @@ class OCRService(WebService): ...@@ -43,25 +43,21 @@ class OCRService(WebService):
data = np.fromstring(data, np.uint8) data = np.fromstring(data, np.uint8)
im = cv2.imdecode(data, cv2.IMREAD_COLOR) im = cv2.imdecode(data, cv2.IMREAD_COLOR)
img_list.append(im) img_list.append(im)
feed_list = []
max_wh_ratio = 0 max_wh_ratio = 0
for i, boximg in enumerate(img_list): for i, boximg in enumerate(img_list):
h, w = boximg.shape[0:2] h, w = boximg.shape[0:2]
wh_ratio = w * 1.0 / h wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio) max_wh_ratio = max(max_wh_ratio, wh_ratio)
for img in img_list: _, w, h = self.ocr_reader.resize_norm_img(img_list[0],
max_wh_ratio).shape
imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')
for i, img in enumerate(img_list):
norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
#feed = {"image": norm_img} imgs[i] = norm_img
feed_list.append(norm_img)
if len(feed_list) == 1: feed = {"image": imgs.copy()}
feed_batch = {
"image": np.concatenate(
feed_list, axis=0)[np.newaxis, :]
}
else:
feed_batch = {"image": np.concatenate(feed_list, axis=0)}
fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
return feed_batch, fetch, True return feed, fetch, True
def postprocess(self, feed={}, fetch=[], fetch_map=None): def postprocess(self, feed={}, fetch=[], fetch_map=None):
rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
......
...@@ -198,12 +198,12 @@ class Client(object): ...@@ -198,12 +198,12 @@ class Client(object):
self.rpc_timeout_ms = rpc_timeout self.rpc_timeout_ms = rpc_timeout
def use_key(self, key_filename): def use_key(self, key_filename):
with open(key_filename, "r") as f: with open(key_filename, "rb") as f:
self.key = f.read() self.key = f.read()
def get_serving_port(self, endpoints): def get_serving_port(self, endpoints):
if self.key is not None: if self.key is not None:
req = json.dumps({"key": base64.b64encode(self.key)}) req = json.dumps({"key": base64.b64encode(self.key).decode()})
else: else:
req = json.dumps({}) req = json.dumps({})
r = requests.post("http://" + endpoints[0], req) r = requests.post("http://" + endpoints[0], req)
......
...@@ -326,7 +326,10 @@ class ProcessChannel(object): ...@@ -326,7 +326,10 @@ class ProcessChannel(object):
with self._cv: with self._cv:
while self._stop.value == 0: while self._stop.value == 0:
try: try:
self._que.put({op_name: channeldata}, timeout=0) self._que.put((channeldata.id, {
op_name: channeldata
}),
timeout=0)
break break
except Queue.Full: except Queue.Full:
self._cv.wait() self._cv.wait()
...@@ -378,7 +381,7 @@ class ProcessChannel(object): ...@@ -378,7 +381,7 @@ class ProcessChannel(object):
else: else:
while self._stop.value == 0: while self._stop.value == 0:
try: try:
self._que.put(put_data, timeout=0) self._que.put((data_id, put_data), timeout=0)
break break
except Queue.Empty: except Queue.Empty:
self._cv.wait() self._cv.wait()
...@@ -414,7 +417,7 @@ class ProcessChannel(object): ...@@ -414,7 +417,7 @@ class ProcessChannel(object):
with self._cv: with self._cv:
while self._stop.value == 0 and resp is None: while self._stop.value == 0 and resp is None:
try: try:
resp = self._que.get(timeout=0) resp = self._que.get(timeout=0)[1]
break break
except Queue.Empty: except Queue.Empty:
if timeout is not None: if timeout is not None:
...@@ -459,7 +462,7 @@ class ProcessChannel(object): ...@@ -459,7 +462,7 @@ class ProcessChannel(object):
while self._stop.value == 0 and self._consumer_cursors[ while self._stop.value == 0 and self._consumer_cursors[
op_name] - self._base_cursor.value >= len(self._output_buf): op_name] - self._base_cursor.value >= len(self._output_buf):
try: try:
channeldata = self._que.get(timeout=0) channeldata = self._que.get(timeout=0)[1]
self._output_buf.append(channeldata) self._output_buf.append(channeldata)
list_values = list(channeldata.values()) list_values = list(channeldata.values())
_LOGGER.debug( _LOGGER.debug(
...@@ -633,7 +636,10 @@ class ThreadChannel(Queue.PriorityQueue): ...@@ -633,7 +636,10 @@ class ThreadChannel(Queue.PriorityQueue):
with self._cv: with self._cv:
while self._stop is False: while self._stop is False:
try: try:
self.put({op_name: channeldata}, timeout=0) self.put((channeldata.id, {
op_name: channeldata
}),
timeout=0)
break break
except Queue.Full: except Queue.Full:
self._cv.wait() self._cv.wait()
...@@ -680,7 +686,7 @@ class ThreadChannel(Queue.PriorityQueue): ...@@ -680,7 +686,7 @@ class ThreadChannel(Queue.PriorityQueue):
else: else:
while self._stop is False: while self._stop is False:
try: try:
self.put(put_data, timeout=0) self.put((data_id, put_data), timeout=0)
break break
except Queue.Empty: except Queue.Empty:
self._cv.wait() self._cv.wait()
...@@ -716,7 +722,7 @@ class ThreadChannel(Queue.PriorityQueue): ...@@ -716,7 +722,7 @@ class ThreadChannel(Queue.PriorityQueue):
with self._cv: with self._cv:
while self._stop is False and resp is None: while self._stop is False and resp is None:
try: try:
resp = self.get(timeout=0) resp = self.get(timeout=0)[1]
break break
except Queue.Empty: except Queue.Empty:
if timeout is not None: if timeout is not None:
......
...@@ -120,7 +120,7 @@ class PerformanceTracer(object): ...@@ -120,7 +120,7 @@ class PerformanceTracer(object):
tot_cost)) tot_cost))
if "DAG" in op_cost: if "DAG" in op_cost:
calls = op_cost["DAG"].values() calls = list(op_cost["DAG"].values())
calls.sort() calls.sort()
tot = len(calls) tot = len(calls)
qps = 1.0 * tot / self._interval_s qps = 1.0 * tot / self._interval_s
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册