diff --git a/paddle/fluid/inference/api/api_anakin_engine.cc b/paddle/fluid/inference/api/api_anakin_engine.cc index 2ea122bfdf0f47bb050d3ee619ddf2b9623b4e70..2c4894fd887f2f509dc7ab88c367cea5c1aed99a 100644 --- a/paddle/fluid/inference/api/api_anakin_engine.cc +++ b/paddle/fluid/inference/api/api_anakin_engine.cc @@ -50,7 +50,7 @@ template bool PaddleInferenceAnakinPredictor::Init( const contrib::AnakinConfig &config) { if (!(graph_.load(config.model_file))) { - VLOG(30) << "fail to load graph from " << config.model_file; + VLOG(3) << "fail to load graph from " << config.model_file; return false; } auto inputs = graph_.get_ins(); @@ -76,15 +76,15 @@ bool PaddleInferenceAnakinPredictor::Run( std::vector *output_data, int batch_size) { for (const auto &input : inputs) { if (input.dtype != PaddleDType::FLOAT32) { - VLOG(30) << "Only support float type inputs. " << input.name - << "'s type is not float"; + VLOG(3) << "Only support float type inputs. " << input.name + << "'s type is not float"; return false; } auto d_tensor_in_p = executor_p_->get_in(input.name); auto net_shape = d_tensor_in_p->shape(); if (net_shape.size() != input.shape.size()) { - VLOG(30) << " input " << input.name - << "'s shape size should be equal to that of net"; + VLOG(3) << " input " << input.name + << "'s shape size should be equal to that of net"; return false; } int sum = 1; @@ -105,15 +105,15 @@ bool PaddleInferenceAnakinPredictor::Run( if (input.lod.size() > 0) { if (input.lod.size() > 1) { - VLOG(30) << " input lod first dim should <=1, but you set " - << input.lod.size(); + VLOG(3) << " input lod first dim should <=1, but you set " + << input.lod.size(); return false; } std::vector offset(input.lod[0].begin(), input.lod[0].end()); d_tensor_in_p->set_seq_offset(offset); - VLOG(30) << "offset.size(): " << offset.size(); + VLOG(3) << "offset.size(): " << offset.size(); for (int i = 0; i < offset.size(); i++) { - VLOG(30) << offset[i]; + VLOG(3) << offset[i]; } } @@ -124,7 +124,7 @@ bool PaddleInferenceAnakinPredictor::Run( if (cudaMemcpy(d_data_p, static_cast(input.data.data()), d_tensor_in_p->valid_size() * sizeof(float), cudaMemcpyHostToDevice) != 0) { - VLOG(30) << "copy data from CPU to GPU error"; + VLOG(3) << "copy data from CPU to GPU error"; return false; } } @@ -141,7 +141,7 @@ bool PaddleInferenceAnakinPredictor::Run( #endif if (output_data->empty()) { - VLOG(30) << "At least one output should be set with tensors' names."; + VLOG(3) << "At least one output should be set with tensors' names."; return false; } for (auto &output : *output_data) { @@ -157,7 +157,7 @@ bool PaddleInferenceAnakinPredictor::Run( if (cudaMemcpy(output.data.data(), tensor->mutable_data(), tensor->valid_size() * sizeof(float), cudaMemcpyDeviceToHost) != 0) { - VLOG(30) << "copy data from GPU to CPU error"; + VLOG(3) << "copy data from GPU to CPU error"; return false; } } @@ -181,14 +181,14 @@ anakin::Net template std::unique_ptr PaddleInferenceAnakinPredictor::Clone() { - VLOG(30) << "Anakin Predictor::clone"; + VLOG(3) << "Anakin Predictor::clone"; std::unique_ptr cls( new PaddleInferenceAnakinPredictor()); // construct executer from other graph auto anakin_predictor_p = dynamic_cast *>(cls.get()); if (!anakin_predictor_p) { - VLOG(30) << "fail to call Init"; + VLOG(3) << "fail to call Init"; return nullptr; } anakin_predictor_p->get_executer().init(graph_); @@ -206,10 +206,10 @@ template <> std::unique_ptr CreatePaddlePredictor( const contrib::AnakinConfig &config) { - VLOG(30) << "Anakin Predictor create."; + VLOG(3) << "Anakin Predictor create."; if (config.target_type == contrib::AnakinConfig::NVGPU) { #ifdef PADDLE_WITH_CUDA - VLOG(30) << "Anakin Predictor create on [ NVIDIA GPU ]."; + VLOG(3) << "Anakin Predictor create on [ NVIDIA GPU ]."; std::unique_ptr x( new PaddleInferenceAnakinPredictor(config)); return x; @@ -218,12 +218,12 @@ CreatePaddlePredictor( return nullptr; #endif } else if (config.target_type == contrib::AnakinConfig::X86) { - VLOG(30) << "Anakin Predictor create on [ Intel X86 ]."; + VLOG(3) << "Anakin Predictor create on [ Intel X86 ]."; std::unique_ptr x( new PaddleInferenceAnakinPredictor(config)); return x; } else { - VLOG(30) << "Anakin Predictor create on unknown platform."; + VLOG(3) << "Anakin Predictor create on unknown platform."; return nullptr; } } diff --git a/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc b/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc index 48369e2e05af77ae984b0b3039a06bbf8bd195fb..c4022225fd4526998af8526d0afb87e7a5be6336 100644 --- a/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc @@ -217,9 +217,9 @@ void single_test() { LOG(INFO) << "sequence_length = " << seq_offset[seq_offset.size() - 1]; float* data_o = static_cast(outputs[0].data.data()); - VLOG(30) << "outputs[0].data.length() = " << outputs[0].data.length(); + VLOG(3) << "outputs[0].data.length() = " << outputs[0].data.length(); for (size_t j = 0; j < outputs[0].data.length(); ++j) { - VLOG(30) << "output[" << j << "]: " << data_o[j]; + VLOG(3) << "output[" << j << "]: " << data_o[j]; } } }