提交 e84361dd 编写于 作者: W wangjiawei04

codestyle

上级 02332b49
...@@ -199,13 +199,9 @@ int PredictorClient::numpy_predict( ...@@ -199,13 +199,9 @@ int PredictorClient::numpy_predict(
<< float_shape[vec_idx].size(); << float_shape[vec_idx].size();
for (uint32_t j = 0; j < float_shape[vec_idx].size(); ++j) { for (uint32_t j = 0; j < float_shape[vec_idx].size(); ++j) {
tensor->add_shape(float_shape[vec_idx][j]); tensor->add_shape(float_shape[vec_idx][j]);
std::cout << "float shape " << j << " : " << float_shape[vec_idx][j]
<< std::endl;
} }
for (uint32_t j = 0; j < float_lod_slot_batch[vec_idx].size(); ++j) { for (uint32_t j = 0; j < float_lod_slot_batch[vec_idx].size(); ++j) {
tensor->add_lod(float_lod_slot_batch[vec_idx][j]); tensor->add_lod(float_lod_slot_batch[vec_idx][j]);
std::cout << "float lod: " << vec_idx << " " << j
<< " value:" << float_lod_slot_batch[vec_idx][j] << std::endl;
} }
tensor->set_elem_type(1); tensor->set_elem_type(1);
const int float_shape_size = float_shape[vec_idx].size(); const int float_shape_size = float_shape[vec_idx].size();
...@@ -264,13 +260,9 @@ int PredictorClient::numpy_predict( ...@@ -264,13 +260,9 @@ int PredictorClient::numpy_predict(
for (uint32_t j = 0; j < int_shape[vec_idx].size(); ++j) { for (uint32_t j = 0; j < int_shape[vec_idx].size(); ++j) {
tensor->add_shape(int_shape[vec_idx][j]); tensor->add_shape(int_shape[vec_idx][j]);
std::cout << "int shape " << j << " : " << int_shape[vec_idx][j]
<< std::endl;
} }
for (uint32_t j = 0; j < int_lod_slot_batch[vec_idx].size(); ++j) { for (uint32_t j = 0; j < int_lod_slot_batch[vec_idx].size(); ++j) {
tensor->add_lod(int_lod_slot_batch[vec_idx][j]); tensor->add_lod(int_lod_slot_batch[vec_idx][j]);
std::cout << "int lod: " << vec_idx << " " << j
<< " value:" << int_lod_slot_batch[vec_idx][j] << std::endl;
} }
tensor->set_elem_type(_type[idx]); tensor->set_elem_type(_type[idx]);
......
...@@ -135,8 +135,6 @@ int GeneralReaderOp::inference() { ...@@ -135,8 +135,6 @@ int GeneralReaderOp::inference() {
lod_tensor.dtype = paddle::PaddleDType::INT32; lod_tensor.dtype = paddle::PaddleDType::INT32;
} }
// implement lod tensor here // implement lod tensor here
std::cout << "lod size: " << req->insts(0).tensor_array(i).lod_size()
<< std::endl;
if (req->insts(0).tensor_array(i).lod_size() > 0) { if (req->insts(0).tensor_array(i).lod_size() > 0) {
VLOG(2) << "(logid=" << log_id << ") var[" << i << "] is lod_tensor"; VLOG(2) << "(logid=" << log_id << ") var[" << i << "] is lod_tensor";
lod_tensor.lod.resize(1); lod_tensor.lod.resize(1);
...@@ -224,7 +222,6 @@ int GeneralReaderOp::inference() { ...@@ -224,7 +222,6 @@ int GeneralReaderOp::inference() {
int offset = 0; int offset = 0;
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
int elem_num = req->insts(j).tensor_array(i).int64_data_size(); int elem_num = req->insts(j).tensor_array(i).int64_data_size();
std::cout << "int elem num: " << elem_num << std::endl;
for (int k = 0; k < elem_num; ++k) { for (int k = 0; k < elem_num; ++k) {
dst_ptr[offset + k] = req->insts(j).tensor_array(i).int64_data(k); dst_ptr[offset + k] = req->insts(j).tensor_array(i).int64_data(k);
} }
...@@ -236,7 +233,6 @@ int GeneralReaderOp::inference() { ...@@ -236,7 +233,6 @@ int GeneralReaderOp::inference() {
int offset = 0; int offset = 0;
for (int j = 0; j < batch_size; ++j) { for (int j = 0; j < batch_size; ++j) {
int elem_num = req->insts(j).tensor_array(i).float_data_size(); int elem_num = req->insts(j).tensor_array(i).float_data_size();
std::cout << "float elem num: " << elem_num << std::endl;
for (int k = 0; k < elem_num; ++k) { for (int k = 0; k < elem_num; ++k) {
dst_ptr[offset + k] = req->insts(j).tensor_array(i).float_data(k); dst_ptr[offset + k] = req->insts(j).tensor_array(i).float_data(k);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册