From 2f8901cb9c0e6453916c66d800acdf81e27d74a4 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Wed, 16 Nov 2022 11:17:32 +0800 Subject: [PATCH] increase the level of some log (#47990) --- .../fluid/eager/auto_code_generator/eager_generator.cc | 2 +- .../auto_code_generator/generator/python_c_gen.py | 4 ++-- paddle/fluid/eager/to_static/run_program_op_func.h | 4 ++-- paddle/fluid/framework/attribute_checker.h | 2 +- paddle/fluid/imperative/tracer.cc | 2 +- paddle/fluid/pybind/eager_functions.cc | 2 +- paddle/fluid/pybind/eager_math_op_patch.cc | 10 +++++----- paddle/fluid/pybind/eager_method.cc | 2 +- paddle/fluid/pybind/eager_utils.cc | 6 +++--- paddle/phi/infermeta/binary.cc | 4 ++-- paddle/phi/infermeta/unary.cc | 2 +- 11 files changed, 20 insertions(+), 20 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 2f51294f15..8485183f7a 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -592,7 +592,7 @@ static bool CheckOpProto(proto::OpProto* op_proto) { } // Only handle matmul_v2 for now - VLOG(1) << "------ Analyzing Op ------: " << op_type; + VLOG(3) << "------ Analyzing Op ------: " << op_type; return true; } diff --git a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py index 10eed267bc..8e3944b79c 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py @@ -115,7 +115,7 @@ NOAMP_DYGRAPH_FUNCTION_TEMPLATE = "decltype({}({})) out = {}({});" FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place)) {{ #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) phi::backends::gpu::SetDeviceId(place.device); - VLOG(1) <<"CurrentDeviceId: " << phi::backends::gpu::GetCurrentDeviceId() << " from " << (int)place.device; + VLOG(4) <<"CurrentDeviceId: " << phi::backends::gpu::GetCurrentDeviceId() << " from " << (int)place.device; #else PADDLE_THROW(paddle::platform::errors::PreconditionNotMet( "PaddlePaddle should compile with GPU if use CUDAPlace.")); @@ -124,7 +124,7 @@ FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place if (paddle::platform::is_custom_place(place)) {{ #if defined(PADDLE_WITH_CUSTOM_DEVICE) phi::DeviceManager::SetDevice(place); - VLOG(1) <<"CurrentDeviceId: " << phi::DeviceManager::GetDevice(place.GetDeviceType()) << " from " << (int)place.device; + VLOG(4) <<"CurrentDeviceId: " << phi::DeviceManager::GetDevice(place.GetDeviceType()) << " from " << (int)place.device; #else PADDLE_THROW(paddle::platform::errors::PreconditionNotMet( "PaddlePaddle should compile with CUSTOM_DEVICE if use CustomPlace.")); diff --git a/paddle/fluid/eager/to_static/run_program_op_func.h b/paddle/fluid/eager/to_static/run_program_op_func.h index 23ba88c889..8a6b59808d 100644 --- a/paddle/fluid/eager/to_static/run_program_op_func.h +++ b/paddle/fluid/eager/to_static/run_program_op_func.h @@ -32,7 +32,7 @@ static void clear_no_grad_edges( for (size_t i = 0; i < params.size(); ++i) { auto p_grad_name = paddle::framework::GradVarName(params[i].name()); if (!block_desc->HasVar(p_grad_name)) { - VLOG(1) << "clear edge of " << p_grad_name; + VLOG(3) << "clear edge of " << p_grad_name; grad_node->MutableOutputMeta()[slot_id][i].GetMutableEdge().Clear(); } } @@ -48,7 +48,7 @@ static void clear_no_grad_edges_with_partial_block( auto p_grad_name = paddle::framework::GradVarName(params[i].name()); if (!forward_block_desc->HasVar(p_grad_name) && !backward_block_desc->HasVar(p_grad_name)) { - VLOG(1) << "clear edge of " << p_grad_name; + VLOG(3) << "clear edge of " << p_grad_name; grad_node->MutableOutputMeta()[slot_id][i].GetMutableEdge().Clear(); } } diff --git a/paddle/fluid/framework/attribute_checker.h b/paddle/fluid/framework/attribute_checker.h index 6552d167e1..67eb69efdf 100644 --- a/paddle/fluid/framework/attribute_checker.h +++ b/paddle/fluid/framework/attribute_checker.h @@ -249,7 +249,7 @@ class TypedAttrChecker { "doesn't support phi::DenseTensor type.", attr_name_)); - VLOG(1) << "Found Attribute " << attr_name_ << " with type(Variable)."; + VLOG(3) << "Found Attribute " << attr_name_ << " with type(Variable)."; var_info_checker_(it->second); return; } diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 400c0021d6..08f73c51fe 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -223,7 +223,7 @@ void Tracer::TraceOpImpl(const std::string& type, platform::RecordEvent op_type_record_event( type, platform::TracerEventType::Operator, 1); platform::ScopedFlushDenormal flush; - VLOG(1) << "Trace Op: " << type; + VLOG(4) << "Trace Op: " << type; if (FLAGS_use_mkldnn) { // if both lists are empty all ops are enabled (default for // FLAGS_use_mkldnn=1) diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 493a8d0b33..cdace567b2 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -161,7 +161,7 @@ static PyObject* eager_api_run_partial_grad(PyObject* self, only_inputs, allow_unused, no_grad_vars); - VLOG(1) << " in eager_api_run_partial_grad, after runing egr::Grad"; + VLOG(4) << " in eager_api_run_partial_grad, after runing egr::Grad"; } return ToPyObject(result, true /* return_py_none_if_not_initialize */); EAGER_CATCH_AND_THROW_RETURN_NULL diff --git a/paddle/fluid/pybind/eager_math_op_patch.cc b/paddle/fluid/pybind/eager_math_op_patch.cc index 6c7d974e70..24ec364efb 100644 --- a/paddle/fluid/pybind/eager_math_op_patch.cc +++ b/paddle/fluid/pybind/eager_math_op_patch.cc @@ -349,7 +349,7 @@ static PyObject* tensor__rsub__method(TensorObject* self, 1); EAGER_TRY - VLOG(1) << "Running Eager tensor__rsub__method"; + VLOG(4) << "Running Eager tensor__rsub__method"; // Set Device ID auto place = egr::Controller::Instance().GetExpectedPlace(); @@ -771,7 +771,7 @@ static PyObject* tensor__gt__method(TensorObject* self, 1); EAGER_TRY - VLOG(1) << "Running Eager tensor__gt__method"; + VLOG(4) << "Running Eager tensor__gt__method"; // Set Device ID auto place = egr::Controller::Instance().GetExpectedPlace(); @@ -857,7 +857,7 @@ static PyObject* tensor__ge__method(TensorObject* self, 1); EAGER_TRY - VLOG(1) << "Running Eager tensor__ge__method"; + VLOG(4) << "Running Eager tensor__ge__method"; // Set Device ID auto place = egr::Controller::Instance().GetExpectedPlace(); @@ -1134,7 +1134,7 @@ static PyObject* tensor__lt__method(TensorObject* self, 1); EAGER_TRY - VLOG(1) << "Running Eager tensor__lt__method"; + VLOG(4) << "Running Eager tensor__lt__method"; // Set Device ID auto place = egr::Controller::Instance().GetExpectedPlace(); @@ -1220,7 +1220,7 @@ static PyObject* tensor__le__method(TensorObject* self, 1); EAGER_TRY - VLOG(1) << "Running Eager tensor__le__method"; + VLOG(4) << "Running Eager tensor__le__method"; // Set Device ID auto place = egr::Controller::Instance().GetExpectedPlace(); diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 07978fc053..3c52a705fc 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -800,7 +800,7 @@ static PyObject* tensor_method__get_tensor_from_selected_rows( auto* dense_tensor = static_cast(selected_rows->mutable_value()); - VLOG(1) << "dense_tensor: " << dense_tensor->IsInitialized(); + VLOG(4) << "dense_tensor: " << dense_tensor->IsInitialized(); auto t = paddle::experimental::Tensor( egr::Controller::Instance().GenerateUniqueName()); diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 04f9e20aa2..4cbac193ad 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1243,7 +1243,7 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, ssize_t arg_pos) { PyTypeObject* type = obj->ob_type; auto type_name = std::string(type->tp_name); - VLOG(1) << "type_name: " << type_name; + VLOG(4) << "type_name: " << type_name; if (type_name == "numpy.ndarray" && PySequence_Check(obj)) { PyObject* item = nullptr; item = PySequence_GetItem(obj, 0); @@ -1296,7 +1296,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, // obj could be: int, float, bool, paddle.Tensor PyTypeObject* type = obj->ob_type; auto type_name = std::string(type->tp_name); - VLOG(1) << "type_name: " << type_name; + VLOG(4) << "type_name: " << type_name; if (PyBool_Check(obj)) { bool value = CastPyArg2Boolean(obj, op_type, arg_pos); return paddle::experimental::Scalar(value); @@ -1348,7 +1348,7 @@ std::vector CastPyArg2ScalarArray(PyObject* obj, PyTypeObject* type = obj->ob_type; auto type_name = std::string(type->tp_name); - VLOG(1) << "type_name: " << type_name; + VLOG(4) << "type_name: " << type_name; if (PyList_Check(obj)) { Py_ssize_t len = PyList_Size(obj); PyObject* item = nullptr; diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 466a60be25..c48388a031 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -195,10 +195,10 @@ void BincountInferMeta(const MetaTensor& x, "But the dimension of Input(X) is [%d]", input_dim.size())); - VLOG(1) << "####### CHECK weights"; + VLOG(4) << "####### CHECK weights"; if (weights) { auto weights_dim = weights.dims(); - VLOG(1) << "##### weights_dim " << weights_dim; + VLOG(4) << "##### weights_dim " << weights_dim; PADDLE_ENFORCE_EQ(weights_dim.size(), 1, phi::errors::InvalidArgument( diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 4746adc892..f51a4a2b2b 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -3732,7 +3732,7 @@ void StridedSliceRawInferMeta(const MetaTensor& x, } out_dims = phi::make_ddim(new_out_shape); } - VLOG(1) << "out_dims: " << out_dims; + VLOG(4) << "out_dims: " << out_dims; out->set_dims(out_dims); out->share_lod(x); out->set_dtype(x.dtype()); -- GitLab