未验证 提交 b132da89 编写于 作者: M MRXLT 提交者: GitHub

Merge pull request #408 from MRXLT/0.2.0-cherry-v2

[cherry pick to 0.2.0 ] PR 406 407
...@@ -27,7 +27,8 @@ import ( ...@@ -27,7 +27,8 @@ import (
type Tensor struct { type Tensor struct {
Data []byte `json:"data"` Data []byte `json:"data"`
FloatData []float32 `json:"float_data"` FloatData []float32 `json:"float_data"`
IntData []int64 `json:"int_data"` IntData []int `json:"int_data"`
Int64Data []int64 `json:"int64_data"`
ElemType int `json:"elem_type"` ElemType int `json:"elem_type"`
Shape []int `json:"shape"` Shape []int `json:"shape"`
} }
...@@ -116,9 +117,9 @@ func Predict(handle Handle, int_feed_map map[string][]int64, fetch []string) map ...@@ -116,9 +117,9 @@ func Predict(handle Handle, int_feed_map map[string][]int64, fetch []string) map
for i := 0; i < len(handle.FeedAliasNames); i++ { for i := 0; i < len(handle.FeedAliasNames); i++ {
key_i := handle.FeedAliasNames[i] key_i := handle.FeedAliasNames[i]
var tmp Tensor var tmp Tensor
tmp.IntData = []int64{} tmp.IntData = []int{}
tmp.Shape = []int{} tmp.Shape = []int{}
tmp.IntData = int_feed_map[key_i] tmp.Int64Data = int_feed_map[key_i]
tmp.ElemType = 0 tmp.ElemType = 0
tmp.Shape = handle.FeedShapeMap[key_i] tmp.Shape = handle.FeedShapeMap[key_i]
tensor_array = append(tensor_array, tmp) tensor_array = append(tensor_array, tmp)
......
...@@ -64,4 +64,4 @@ result = multi_thread_runner.run(single_func, args.thread, {}) ...@@ -64,4 +64,4 @@ result = multi_thread_runner.run(single_func, args.thread, {})
avg_cost = 0 avg_cost = 0
for cost in result[0]: for cost in result[0]:
avg_cost += cost avg_cost += cost
print("total cost of each thread".format(avg_cost / args.thread)) print("total cost {} s of each thread".format(avg_cost / args.thread))
...@@ -116,7 +116,7 @@ class Server(object): ...@@ -116,7 +116,7 @@ class Server(object):
self.reload_interval_s = 10 self.reload_interval_s = 10
self.module_path = os.path.dirname(paddle_serving_server.__file__) self.module_path = os.path.dirname(paddle_serving_server.__file__)
self.cur_path = os.getcwd() self.cur_path = os.getcwd()
self.check_cuda self.check_cuda()
self.use_local_bin = False self.use_local_bin = False
self.gpuid = 0 self.gpuid = 0
...@@ -144,7 +144,7 @@ class Server(object): ...@@ -144,7 +144,7 @@ class Server(object):
self.bin_path = os.environ["SERVING_BIN"] self.bin_path = os.environ["SERVING_BIN"]
def check_cuda(self): def check_cuda(self):
r = os.system("whereis cuda") r = os.system("nvcc --version > /dev/null")
if r != 0: if r != 0:
raise SystemExit( raise SystemExit(
"CUDA not found, please check your environment or use cpu version by \"pip install paddle_serving_server\"" "CUDA not found, please check your environment or use cpu version by \"pip install paddle_serving_server\""
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册