提交 85948602 编写于 作者: M MRXLT

Merge remote-tracking branch 'upstream/develop' into develop

# 概述 # 概述
PaddlePaddle是百度开源的机器学习框架,广泛支持各种深度学习模型的定制化开发; Paddle serving是Paddle的在线预测部分,与Paddle模型训练环节无缝衔接,提供机器学习预测云服务。 PaddlePaddle是百度开源的机器学习框架,广泛支持各种深度学习模型的定制化开发; Paddle serving是PaddlePaddle的在线预估服务框架,通过加载PaddlePaddle训练得到的模型,利用PaddlePaddle的预测库,提供机器学习预测云服务。
# 文档
[设计文档](doc/DESIGN.md)
[从零开始写一个预测服务](doc/CREATING.md)
[编译安装](doc/INSTALL.md)
[FAQ](doc/FAQ.md)
# 框架简介 # 框架简介
...@@ -80,13 +91,3 @@ Paddle serving框架为策略工程师提供以下三层面的功能性扩展: ...@@ -80,13 +91,3 @@ Paddle serving框架为策略工程师提供以下三层面的功能性扩展:
`-- tools # CI工具 `-- tools # CI工具
`-- codestyle `-- codestyle
``` ```
# 文档
[设计文档](doc/DESIGN.md)
[从零开始写一个预测服务](doc/CREATING.md)
[编译安装](doc/INSTALL.md)
[FAQ](doc/FAQ.md)
...@@ -81,8 +81,8 @@ func CmdInstsDownload() { ...@@ -81,8 +81,8 @@ func CmdInstsDownload() {
} }
} }
for i, inst := range Dict.Instances { for i, inst := range Dict.Instances {
if inst.Status != dict.Instance_Status_Download_Succ {
err := <-chs[i] err := <-chs[i]
logex.Noticef("[instance resp]download:%v", Dict.Instances)
if err != nil || keyAndRespSlice[i].Success != "0" { if err != nil || keyAndRespSlice[i].Success != "0" {
logex.Warningf("cmd cube online downlaod of %v:%v, shard:%v failed", inst.AgentIp, inst.AgentPort, inst.Shard) logex.Warningf("cmd cube online downlaod of %v:%v, shard:%v failed", inst.AgentIp, inst.AgentPort, inst.Shard)
continue continue
...@@ -93,6 +93,7 @@ func CmdInstsDownload() { ...@@ -93,6 +93,7 @@ func CmdInstsDownload() {
Dict.DownloadSuccInsts++ Dict.DownloadSuccInsts++
} }
} }
}
if Dict.DownloadSuccInsts == Dict.InstancesNum { if Dict.DownloadSuccInsts == Dict.InstancesNum {
Dict.WaitVersionInfo.Status = dict.Dict_Status_Download_Succ Dict.WaitVersionInfo.Status = dict.Dict_Status_Download_Succ
fmt.Printf("[all download ok]inst :%v\n", Dict.Instances) fmt.Printf("[all download ok]inst :%v\n", Dict.Instances)
...@@ -130,6 +131,7 @@ func CmdInstsReload() { ...@@ -130,6 +131,7 @@ func CmdInstsReload() {
} }
} }
for i, inst := range Dict.Instances { for i, inst := range Dict.Instances {
if inst.Status != dict.Instance_Status_Reload_Succ {
err := <-chs[i] err := <-chs[i]
logex.Noticef("[instance resp]reload:%v", Dict.Instances) logex.Noticef("[instance resp]reload:%v", Dict.Instances)
if err != nil || keyAndRespSlice[i].Success != "0" { if err != nil || keyAndRespSlice[i].Success != "0" {
...@@ -142,6 +144,7 @@ func CmdInstsReload() { ...@@ -142,6 +144,7 @@ func CmdInstsReload() {
Dict.ReloadSuccInsts++ Dict.ReloadSuccInsts++
} }
} }
}
if Dict.ReloadSuccInsts == Dict.InstancesNum { if Dict.ReloadSuccInsts == Dict.InstancesNum {
Dict.WaitVersionInfo.Status = dict.Dict_Status_Reload_Succ Dict.WaitVersionInfo.Status = dict.Dict_Status_Reload_Succ
fmt.Printf("[all reload ok]inst:%v\n", Dict.Instances) fmt.Printf("[all reload ok]inst:%v\n", Dict.Instances)
...@@ -179,6 +182,7 @@ func CmdInstsEnable() { ...@@ -179,6 +182,7 @@ func CmdInstsEnable() {
} }
} }
for i, inst := range Dict.Instances { for i, inst := range Dict.Instances {
if inst.Status != dict.Instance_Status_Enable_Succ {
err := <-chs[i] err := <-chs[i]
logex.Noticef("[instance resp]enable:%v", Dict.Instances) logex.Noticef("[instance resp]enable:%v", Dict.Instances)
if err != nil || keyAndRespSlice[i].Success != "0" { if err != nil || keyAndRespSlice[i].Success != "0" {
...@@ -191,6 +195,7 @@ func CmdInstsEnable() { ...@@ -191,6 +195,7 @@ func CmdInstsEnable() {
Dict.EnableSuccInsts++ Dict.EnableSuccInsts++
} }
} }
}
if Dict.EnableSuccInsts == Dict.InstancesNum { if Dict.EnableSuccInsts == Dict.InstancesNum {
Dict.WaitVersionInfo.Status = dict.Dict_Status_Finished Dict.WaitVersionInfo.Status = dict.Dict_Status_Finished
fmt.Printf("[all enable ok]inst :%v\n", Dict.Instances) fmt.Printf("[all enable ok]inst :%v\n", Dict.Instances)
......
...@@ -19,7 +19,7 @@ import os ...@@ -19,7 +19,7 @@ import os
from elastic_ctr_api import ElasticCTRAPI from elastic_ctr_api import ElasticCTRAPI
BATCH_SIZE = 3 BATCH_SIZE = 10
SERVING_IP = "127.0.0.1" SERVING_IP = "127.0.0.1"
SLOT_CONF_FILE = "./conf/slot.conf" SLOT_CONF_FILE = "./conf/slot.conf"
CTR_EMBEDDING_TABLE_SIZE = 100000001 CTR_EMBEDDING_TABLE_SIZE = 100000001
...@@ -33,6 +33,59 @@ def str2long(str): ...@@ -33,6 +33,59 @@ def str2long(str):
return int(str) return int(str)
def tied_rank(x):
"""
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
def auc(actual, posterior):
"""
Computes the area under the receiver-operater characteristic (AUC)
This function computes the AUC error metric for binary classification.
Parameters
----------
actual : list of binary numbers, numpy array
The ground truth value
posterior : same type as actual
Defines a ranking on the binary numbers, from most likely to
be positive to least likely to be positive.
Returns
-------
score : double
The mean squared error between actual and posterior
"""
r = tied_rank(posterior)
num_positive = len([0 for x in actual if x==1])
num_negative = len(actual)-num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1])
auc = ((sum_positive - num_positive*(num_positive+1)/2.0) /
(num_negative*num_positive))
return auc
def data_reader(data_file, samples, labels): def data_reader(data_file, samples, labels):
if not os.path.exists(data_file): if not os.path.exists(data_file):
print("Path %s not exist" % data_file) print("Path %s not exist" % data_file)
...@@ -89,8 +142,10 @@ if __name__ == "__main__": ...@@ -89,8 +142,10 @@ if __name__ == "__main__":
sys.exit(-1) sys.exit(-1)
ret = data_reader(sys.argv[4], samples, labels) ret = data_reader(sys.argv[4], samples, labels)
print(len(samples))
correct = 0 correct = 0
wrong_label_1_count = 0
result_list = []
for i in range(0, len(samples) - BATCH_SIZE, BATCH_SIZE): for i in range(0, len(samples) - BATCH_SIZE, BATCH_SIZE):
api.clear() api.clear()
batch = samples[i:i + BATCH_SIZE] batch = samples[i:i + BATCH_SIZE]
...@@ -110,6 +165,7 @@ if __name__ == "__main__": ...@@ -110,6 +165,7 @@ if __name__ == "__main__":
idx = 0 idx = 0
for x in predictions: for x in predictions:
result_list.append(x["prob1"])
if x["prob0"] >= x["prob1"]: if x["prob0"] >= x["prob1"]:
pred = 0 pred = 0
else: else:
...@@ -118,9 +174,14 @@ if __name__ == "__main__": ...@@ -118,9 +174,14 @@ if __name__ == "__main__":
if labels[i + idx] == pred: if labels[i + idx] == pred:
correct += 1 correct += 1
else: else:
print("id=%d predict incorrect: pred=%d label=%d (%f %f)" % #if labels[i + idx] == 1:
(i + idx, pred, labels[i + idx], x["prob0"], x["prob1"])) # wrong_label_1_count += 1
# print("error label=1 count", wrong_label_1_count)
#print("id=%d predict incorrect: pred=%d label=%d (%f %f)" %
# (i + idx, pred, labels[i + idx], x["prob0"], x["prob1"]))
pass
idx = idx + 1 idx = idx + 1
print("Acc=%f" % (float(correct) / len(samples)))
#print("Acc=%f" % (float(correct) / len(samples)))
print("auc = ", auc(labels, result_list) )
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册