提交 99781bce 编写于 作者: B barriery

add logid in op

上级 e69c6357
...@@ -45,36 +45,41 @@ int GeneralCopyOp::inference() { ...@@ -45,36 +45,41 @@ int GeneralCopyOp::inference() {
const std::string pre_name = pre_node_names[0]; const std::string pre_name = pre_node_names[0];
const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name); const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name);
VLOG(2) << "precedent name: " << pre_name; uint64_t log_id = input_blob->GetLogId();
VLOG(2) << "(logid=" << log_id << ") precedent name: " << pre_name;
const TensorVector *in = &input_blob->tensor_vector; const TensorVector *in = &input_blob->tensor_vector;
VLOG(2) << "input size: " << in->size(); VLOG(2) << "(logid=" << log_id << ") input size: " << in->size();
int batch_size = input_blob->GetBatchSize(); int batch_size = input_blob->GetBatchSize();
int input_var_num = 0; int input_var_num = 0;
GeneralBlob *res = mutable_data<GeneralBlob>(); GeneralBlob *res = mutable_data<GeneralBlob>();
res->SetLogId(log_id);
TensorVector *out = &res->tensor_vector; TensorVector *out = &res->tensor_vector;
VLOG(2) << "input batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") input batch size: " << batch_size;
res->SetBatchSize(batch_size); res->SetBatchSize(batch_size);
if (!res) { if (!res) {
LOG(ERROR) << "Failed get op tls reader object output"; LOG(ERROR) << "(logid=" << log_id
<< ") Failed get op tls reader object output";
} }
Timer timeline; Timer timeline;
int64_t start = timeline.TimeStampUS(); int64_t start = timeline.TimeStampUS();
VLOG(2) << "Going to init lod tensor"; VLOG(2) << "(logid=" << log_id << ") Going to init lod tensor";
for (int i = 0; i < in->size(); ++i) { for (int i = 0; i < in->size(); ++i) {
paddle::PaddleTensor lod_tensor; paddle::PaddleTensor lod_tensor;
CopyLod(&in->at(i), &lod_tensor); CopyLod(&in->at(i), &lod_tensor);
lod_tensor.dtype = in->at(i).dtype; lod_tensor.dtype = in->at(i).dtype;
lod_tensor.name = in->at(i).name; lod_tensor.name = in->at(i).name;
VLOG(2) << "lod tensor [" << i << "].name = " << lod_tensor.name; VLOG(2) << "(logid=" << log_id << ") lod tensor [" << i
<< "].name = " << lod_tensor.name;
out->push_back(lod_tensor); out->push_back(lod_tensor);
} }
VLOG(2) << "pack done."; VLOG(2) << "(logid=" << log_id << ") pack done.";
for (int i = 0; i < out->size(); ++i) { for (int i = 0; i < out->size(); ++i) {
int64_t *src_ptr = static_cast<int64_t *>(in->at(i).data.data()); int64_t *src_ptr = static_cast<int64_t *>(in->at(i).data.data());
...@@ -86,7 +91,7 @@ int GeneralCopyOp::inference() { ...@@ -86,7 +91,7 @@ int GeneralCopyOp::inference() {
} }
} }
VLOG(2) << "output done."; VLOG(2) << "(logid=" << log_id << ") output done.";
timeline.Pause(); timeline.Pause();
int64_t end = timeline.TimeStampUS(); int64_t end = timeline.TimeStampUS();
...@@ -94,7 +99,7 @@ int GeneralCopyOp::inference() { ...@@ -94,7 +99,7 @@ int GeneralCopyOp::inference() {
AddBlobInfo(res, start); AddBlobInfo(res, start);
AddBlobInfo(res, end); AddBlobInfo(res, end);
VLOG(2) << "read data from client success"; VLOG(2) << "(logid=" << log_id << ") read data from client success";
return 0; return 0;
} }
......
...@@ -50,18 +50,20 @@ int GeneralDistKVInferOp::inference() { ...@@ -50,18 +50,20 @@ int GeneralDistKVInferOp::inference() {
const std::string pre_name = pre_node_names[0]; const std::string pre_name = pre_node_names[0];
const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name); const GeneralBlob *input_blob = get_depend_argument<GeneralBlob>(pre_name);
VLOG(2) << "Get precedent op name: " << pre_name; uint64_t log_id = input_blob->GetLogId();
VLOG(2) << "(logid=" << log_id << ") Get precedent op name: " << pre_name;
GeneralBlob *output_blob = mutable_data<GeneralBlob>(); GeneralBlob *output_blob = mutable_data<GeneralBlob>();
if (!input_blob) { if (!input_blob) {
LOG(ERROR) << "Failed mutable depended argument, op:" << pre_name; LOG(ERROR) << "(logid=" << log_id
<< ") Failed mutable depended argument, op:" << pre_name;
return -1; return -1;
} }
const TensorVector *in = &input_blob->tensor_vector; const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = &output_blob->tensor_vector; TensorVector *out = &output_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize(); int batch_size = input_blob->GetBatchSize();
VLOG(2) << "input batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") input batch size: " << batch_size;
std::vector<uint64_t> keys; std::vector<uint64_t> keys;
std::vector<rec::mcube::CubeValue> values; std::vector<rec::mcube::CubeValue> values;
int sparse_count = 0; int sparse_count = 0;
...@@ -96,13 +98,14 @@ int GeneralDistKVInferOp::inference() { ...@@ -96,13 +98,14 @@ int GeneralDistKVInferOp::inference() {
rec::mcube::CubeAPI *cube = rec::mcube::CubeAPI::instance(); rec::mcube::CubeAPI *cube = rec::mcube::CubeAPI::instance();
std::vector<std::string> table_names = cube->get_table_names(); std::vector<std::string> table_names = cube->get_table_names();
if (table_names.size() == 0) { if (table_names.size() == 0) {
LOG(ERROR) << "cube init error or cube config not given."; LOG(ERROR) << "(logid=" << log_id
<< ") cube init error or cube config not given.";
return -1; return -1;
} }
int ret = cube->seek(table_names[0], keys, &values); int ret = cube->seek(table_names[0], keys, &values);
int64_t cube_end = timeline.TimeStampUS(); int64_t cube_end = timeline.TimeStampUS();
if (values.size() != keys.size() || values[0].buff.size() == 0) { if (values.size() != keys.size() || values[0].buff.size() == 0) {
LOG(ERROR) << "cube value return null"; LOG(ERROR) << "(logid=" << log_id << ") cube value return null";
} }
size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float); size_t EMBEDDING_SIZE = values[0].buff.size() / sizeof(float);
TensorVector sparse_out; TensorVector sparse_out;
...@@ -153,14 +156,16 @@ int GeneralDistKVInferOp::inference() { ...@@ -153,14 +156,16 @@ int GeneralDistKVInferOp::inference() {
infer_in.insert(infer_in.end(), sparse_out.begin(), sparse_out.end()); infer_in.insert(infer_in.end(), sparse_out.begin(), sparse_out.end());
output_blob->SetBatchSize(batch_size); output_blob->SetBatchSize(batch_size);
output_blob->SetLogId(log_id);
VLOG(2) << "infer batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size;
int64_t start = timeline.TimeStampUS(); int64_t start = timeline.TimeStampUS();
if (InferManager::instance().infer( if (InferManager::instance().infer(
engine_name().c_str(), &infer_in, out, batch_size)) { engine_name().c_str(), &infer_in, out, batch_size)) {
LOG(ERROR) << "Failed do infer in fluid model: " << engine_name(); LOG(ERROR) << "(logid=" << log_id
<< ") Failed do infer in fluid model: " << engine_name();
return -1; return -1;
} }
......
...@@ -59,10 +59,13 @@ int GeneralDistKVQuantInferOp::inference() { ...@@ -59,10 +59,13 @@ int GeneralDistKVQuantInferOp::inference() {
return -1; return -1;
} }
uint64_t log_id = input_blob->GetLogId();
output_blob->SetLogId(log_id);
const TensorVector *in = &input_blob->tensor_vector; const TensorVector *in = &input_blob->tensor_vector;
TensorVector *out = &output_blob->tensor_vector; TensorVector *out = &output_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize(); int batch_size = input_blob->GetBatchSize();
VLOG(2) << "input batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") input batch size: " << batch_size;
std::vector<uint64_t> keys; std::vector<uint64_t> keys;
std::vector<rec::mcube::CubeValue> values; std::vector<rec::mcube::CubeValue> values;
int sparse_count = 0; int sparse_count = 0;
...@@ -94,13 +97,14 @@ int GeneralDistKVQuantInferOp::inference() { ...@@ -94,13 +97,14 @@ int GeneralDistKVQuantInferOp::inference() {
rec::mcube::CubeAPI *cube = rec::mcube::CubeAPI::instance(); rec::mcube::CubeAPI *cube = rec::mcube::CubeAPI::instance();
std::vector<std::string> table_names = cube->get_table_names(); std::vector<std::string> table_names = cube->get_table_names();
if (table_names.size() == 0) { if (table_names.size() == 0) {
LOG(ERROR) << "cube init error or cube config not given."; LOG(ERROR) << "(logid=" << log_id
<< ") cube init error or cube config not given.";
return -1; return -1;
} }
int ret = cube->seek(table_names[0], keys, &values); int ret = cube->seek(table_names[0], keys, &values);
if (values.size() != keys.size() || values[0].buff.size() == 0) { if (values.size() != keys.size() || values[0].buff.size() == 0) {
LOG(ERROR) << "cube value return null"; LOG(ERROR) << "(logid=" << log_id << ") cube value return null";
} }
TensorVector sparse_out; TensorVector sparse_out;
...@@ -182,7 +186,7 @@ int GeneralDistKVQuantInferOp::inference() { ...@@ -182,7 +186,7 @@ int GeneralDistKVQuantInferOp::inference() {
output_blob->SetBatchSize(batch_size); output_blob->SetBatchSize(batch_size);
VLOG(2) << "infer batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") infer batch size: " << batch_size;
Timer timeline; Timer timeline;
int64_t start = timeline.TimeStampUS(); int64_t start = timeline.TimeStampUS();
...@@ -190,7 +194,8 @@ int GeneralDistKVQuantInferOp::inference() { ...@@ -190,7 +194,8 @@ int GeneralDistKVQuantInferOp::inference() {
if (InferManager::instance().infer( if (InferManager::instance().infer(
engine_name().c_str(), &infer_in, out, batch_size)) { engine_name().c_str(), &infer_in, out, batch_size)) {
LOG(ERROR) << "Failed do infer in fluid model: " << engine_name(); LOG(ERROR) << "(logid=" << log_id
<< ") Failed do infer in fluid model: " << engine_name();
return -1; return -1;
} }
......
...@@ -74,9 +74,10 @@ int GeneralTextResponseOp::inference() { ...@@ -74,9 +74,10 @@ int GeneralTextResponseOp::inference() {
return -1; return -1;
} }
uint64_t log_id = input_blob->GetLogId();
const TensorVector *in = &input_blob->tensor_vector; const TensorVector *in = &input_blob->tensor_vector;
int batch_size = input_blob->GetBatchSize(); int batch_size = input_blob->GetBatchSize();
VLOG(2) << "input batch size: " << batch_size; VLOG(2) << "(logid=" << log_id << ") input batch size: " << batch_size;
ModelOutput *output = res->add_outputs(); ModelOutput *output = res->add_outputs();
output->set_engine_name( output->set_engine_name(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册