未验证 提交 4d406d26 编写于 作者: M MRXLT 提交者: GitHub

Merge pull request #4 from PaddlePaddle/develop

update from origin
......@@ -834,9 +834,17 @@ func GetFileName(source string) (fileName string) {
}
func checkMd5(file string, fileMd5 string) (err error) {
md5Cmd := fmt.Sprintf("./scripts/md5_checker %s %s", file, fileMd5)
cmd := fmt.Sprintf("md5sum %s | awk '{print $1}'", file)
stdout, _, _ := pipeline.Run(exec.Command("/bin/sh", "-c", cmd))
real_md5 := stdout.String()
cmd = fmt.Sprintf("cat %s | awk '{print $1}'", fileMd5)
stdout, _, _ = pipeline.Run(exec.Command("/bin/sh", "-c", cmd))
given_md5 := stdout.String()
_, _, err = pipeline.Run(exec.Command("/bin/sh", "-c", md5Cmd))
if real_md5 != given_md5 {
logex.Warningf("checkMd5 failed real_md5[%s] given_md5[%s]", real_md5, given_md5)
err = errors.New("checkMd5 failed")
}
return
}
......
......@@ -30,7 +30,7 @@ using baidu::paddle_serving::predictor::ctr_prediction::Response;
using baidu::paddle_serving::predictor::ctr_prediction::CTRReqInstance;
using baidu::paddle_serving::predictor::ctr_prediction::CTRResInstance;
int batch_size = 1;
int batch_size = 16;
int sparse_num = 26;
int dense_num = 13;
int thread_num = 1;
......@@ -95,8 +95,12 @@ int create_req(Request* req,
return -1;
}
// add data
std::vector<std::string> feature_list =
split(data_list[data_index + i], "\t");
// avoid out of boundary
int cur_index = data_index + i;
if (cur_index >= data_list.size()) {
cur_index = cur_index % data_list.size();
}
std::vector<std::string> feature_list = split(data_list[cur_index], "\t");
for (int fi = 0; fi < dense_num; fi++) {
if (feature_list[fi] == "") {
ins->add_dense_ids(0.0);
......
--enable_model_toolkit
--enable_cube=false
--enable_cube=true
......@@ -263,28 +263,51 @@ int CTRPredictionOp::inference() {
return 0;
}
if (out->size() != sample_size) {
LOG(ERROR) << "Output tensor size not equal that of input";
fill_response_with_message(res, -1, "Output size != input size");
if (out->size() != 1) {
LOG(ERROR) << "Model returned number of fetch tensor more than 1";
fill_response_with_message(
res, -1, "Model returned number of fetch tensor more than 1");
return 0;
}
for (size_t i = 0; i < out->size(); ++i) {
int dim1 = out->at(i).shape[0];
int dim2 = out->at(i).shape[1];
int output_shape_dim = out->at(0).shape.size();
if (output_shape_dim != 2) {
LOG(ERROR) << "Fetch LoDTensor should be shape of [sample_size, 2]";
fill_response_with_message(
res, -1, "Fetch LoDTensor should be shape of [sample_size, 2]");
return 0;
}
if (out->at(i).dtype != paddle::PaddleDType::FLOAT32) {
LOG(ERROR) << "Expected data type float";
fill_response_with_message(res, -1, "Expected data type float");
return 0;
}
if (out->at(0).dtype != paddle::PaddleDType::FLOAT32) {
LOG(ERROR) << "Fetch LoDTensor data type should be FLOAT32";
fill_response_with_message(
res, -1, "Fetch LoDTensor data type should be FLOAT32");
return 0;
}
float *data = static_cast<float *>(out->at(i).data.data());
for (int j = 0; j < dim1; ++j) {
CTRResInstance *res_instance = res->add_predictions();
res_instance->set_prob0(data[j * dim2]);
res_instance->set_prob1(data[j * dim2 + 1]);
}
int dim1 = out->at(0).shape[0];
int dim2 = out->at(0).shape[1];
if (dim1 != sample_size) {
LOG(ERROR) << "Returned result count not equal to sample_size";
fill_response_with_message(
res, -1, "Returned result count not equal to sample size");
return 0;
}
if (dim2 != 2) {
LOG(ERROR) << "Returned result is not expected, should be 2 floats for "
"each sample";
fill_response_with_message(
res, -1, "Retunred result is not 2 floats for each sample");
return 0;
}
float *data = static_cast<float *>(out->at(0).data.data());
for (int i = 0; i < dim1; ++i) {
CTRResInstance *res_instance = res->add_predictions();
res_instance->set_prob0(data[i * dim2]);
res_instance->set_prob1(data[i * dim2 + 1]);
}
for (size_t i = 0; i < in->size(); ++i) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册