未验证 提交 2f8901cb 编写于 作者: L Leo Chen 提交者: GitHub

increase the level of some log (#47990)

上级 0d507fc2
...@@ -592,7 +592,7 @@ static bool CheckOpProto(proto::OpProto* op_proto) { ...@@ -592,7 +592,7 @@ static bool CheckOpProto(proto::OpProto* op_proto) {
} }
// Only handle matmul_v2 for now // Only handle matmul_v2 for now
VLOG(1) << "------ Analyzing Op ------: " << op_type; VLOG(3) << "------ Analyzing Op ------: " << op_type;
return true; return true;
} }
......
...@@ -115,7 +115,7 @@ NOAMP_DYGRAPH_FUNCTION_TEMPLATE = "decltype({}({})) out = {}({});" ...@@ -115,7 +115,7 @@ NOAMP_DYGRAPH_FUNCTION_TEMPLATE = "decltype({}({})) out = {}({});"
FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place)) {{ FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place)) {{
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
phi::backends::gpu::SetDeviceId(place.device); phi::backends::gpu::SetDeviceId(place.device);
VLOG(1) <<"CurrentDeviceId: " << phi::backends::gpu::GetCurrentDeviceId() << " from " << (int)place.device; VLOG(4) <<"CurrentDeviceId: " << phi::backends::gpu::GetCurrentDeviceId() << " from " << (int)place.device;
#else #else
PADDLE_THROW(paddle::platform::errors::PreconditionNotMet( PADDLE_THROW(paddle::platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU if use CUDAPlace.")); "PaddlePaddle should compile with GPU if use CUDAPlace."));
...@@ -124,7 +124,7 @@ FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place ...@@ -124,7 +124,7 @@ FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place
if (paddle::platform::is_custom_place(place)) {{ if (paddle::platform::is_custom_place(place)) {{
#if defined(PADDLE_WITH_CUSTOM_DEVICE) #if defined(PADDLE_WITH_CUSTOM_DEVICE)
phi::DeviceManager::SetDevice(place); phi::DeviceManager::SetDevice(place);
VLOG(1) <<"CurrentDeviceId: " << phi::DeviceManager::GetDevice(place.GetDeviceType()) << " from " << (int)place.device; VLOG(4) <<"CurrentDeviceId: " << phi::DeviceManager::GetDevice(place.GetDeviceType()) << " from " << (int)place.device;
#else #else
PADDLE_THROW(paddle::platform::errors::PreconditionNotMet( PADDLE_THROW(paddle::platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with CUSTOM_DEVICE if use CustomPlace.")); "PaddlePaddle should compile with CUSTOM_DEVICE if use CustomPlace."));
......
...@@ -32,7 +32,7 @@ static void clear_no_grad_edges( ...@@ -32,7 +32,7 @@ static void clear_no_grad_edges(
for (size_t i = 0; i < params.size(); ++i) { for (size_t i = 0; i < params.size(); ++i) {
auto p_grad_name = paddle::framework::GradVarName(params[i].name()); auto p_grad_name = paddle::framework::GradVarName(params[i].name());
if (!block_desc->HasVar(p_grad_name)) { if (!block_desc->HasVar(p_grad_name)) {
VLOG(1) << "clear edge of " << p_grad_name; VLOG(3) << "clear edge of " << p_grad_name;
grad_node->MutableOutputMeta()[slot_id][i].GetMutableEdge().Clear(); grad_node->MutableOutputMeta()[slot_id][i].GetMutableEdge().Clear();
} }
} }
...@@ -48,7 +48,7 @@ static void clear_no_grad_edges_with_partial_block( ...@@ -48,7 +48,7 @@ static void clear_no_grad_edges_with_partial_block(
auto p_grad_name = paddle::framework::GradVarName(params[i].name()); auto p_grad_name = paddle::framework::GradVarName(params[i].name());
if (!forward_block_desc->HasVar(p_grad_name) && if (!forward_block_desc->HasVar(p_grad_name) &&
!backward_block_desc->HasVar(p_grad_name)) { !backward_block_desc->HasVar(p_grad_name)) {
VLOG(1) << "clear edge of " << p_grad_name; VLOG(3) << "clear edge of " << p_grad_name;
grad_node->MutableOutputMeta()[slot_id][i].GetMutableEdge().Clear(); grad_node->MutableOutputMeta()[slot_id][i].GetMutableEdge().Clear();
} }
} }
......
...@@ -249,7 +249,7 @@ class TypedAttrChecker { ...@@ -249,7 +249,7 @@ class TypedAttrChecker {
"doesn't support phi::DenseTensor type.", "doesn't support phi::DenseTensor type.",
attr_name_)); attr_name_));
VLOG(1) << "Found Attribute " << attr_name_ << " with type(Variable)."; VLOG(3) << "Found Attribute " << attr_name_ << " with type(Variable).";
var_info_checker_(it->second); var_info_checker_(it->second);
return; return;
} }
......
...@@ -223,7 +223,7 @@ void Tracer::TraceOpImpl(const std::string& type, ...@@ -223,7 +223,7 @@ void Tracer::TraceOpImpl(const std::string& type,
platform::RecordEvent op_type_record_event( platform::RecordEvent op_type_record_event(
type, platform::TracerEventType::Operator, 1); type, platform::TracerEventType::Operator, 1);
platform::ScopedFlushDenormal flush; platform::ScopedFlushDenormal flush;
VLOG(1) << "Trace Op: " << type; VLOG(4) << "Trace Op: " << type;
if (FLAGS_use_mkldnn) { if (FLAGS_use_mkldnn) {
// if both lists are empty all ops are enabled (default for // if both lists are empty all ops are enabled (default for
// FLAGS_use_mkldnn=1) // FLAGS_use_mkldnn=1)
......
...@@ -161,7 +161,7 @@ static PyObject* eager_api_run_partial_grad(PyObject* self, ...@@ -161,7 +161,7 @@ static PyObject* eager_api_run_partial_grad(PyObject* self,
only_inputs, only_inputs,
allow_unused, allow_unused,
no_grad_vars); no_grad_vars);
VLOG(1) << " in eager_api_run_partial_grad, after runing egr::Grad"; VLOG(4) << " in eager_api_run_partial_grad, after runing egr::Grad";
} }
return ToPyObject(result, true /* return_py_none_if_not_initialize */); return ToPyObject(result, true /* return_py_none_if_not_initialize */);
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
......
...@@ -349,7 +349,7 @@ static PyObject* tensor__rsub__method(TensorObject* self, ...@@ -349,7 +349,7 @@ static PyObject* tensor__rsub__method(TensorObject* self,
1); 1);
EAGER_TRY EAGER_TRY
VLOG(1) << "Running Eager tensor__rsub__method"; VLOG(4) << "Running Eager tensor__rsub__method";
// Set Device ID // Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace(); auto place = egr::Controller::Instance().GetExpectedPlace();
...@@ -771,7 +771,7 @@ static PyObject* tensor__gt__method(TensorObject* self, ...@@ -771,7 +771,7 @@ static PyObject* tensor__gt__method(TensorObject* self,
1); 1);
EAGER_TRY EAGER_TRY
VLOG(1) << "Running Eager tensor__gt__method"; VLOG(4) << "Running Eager tensor__gt__method";
// Set Device ID // Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace(); auto place = egr::Controller::Instance().GetExpectedPlace();
...@@ -857,7 +857,7 @@ static PyObject* tensor__ge__method(TensorObject* self, ...@@ -857,7 +857,7 @@ static PyObject* tensor__ge__method(TensorObject* self,
1); 1);
EAGER_TRY EAGER_TRY
VLOG(1) << "Running Eager tensor__ge__method"; VLOG(4) << "Running Eager tensor__ge__method";
// Set Device ID // Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace(); auto place = egr::Controller::Instance().GetExpectedPlace();
...@@ -1134,7 +1134,7 @@ static PyObject* tensor__lt__method(TensorObject* self, ...@@ -1134,7 +1134,7 @@ static PyObject* tensor__lt__method(TensorObject* self,
1); 1);
EAGER_TRY EAGER_TRY
VLOG(1) << "Running Eager tensor__lt__method"; VLOG(4) << "Running Eager tensor__lt__method";
// Set Device ID // Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace(); auto place = egr::Controller::Instance().GetExpectedPlace();
...@@ -1220,7 +1220,7 @@ static PyObject* tensor__le__method(TensorObject* self, ...@@ -1220,7 +1220,7 @@ static PyObject* tensor__le__method(TensorObject* self,
1); 1);
EAGER_TRY EAGER_TRY
VLOG(1) << "Running Eager tensor__le__method"; VLOG(4) << "Running Eager tensor__le__method";
// Set Device ID // Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace(); auto place = egr::Controller::Instance().GetExpectedPlace();
......
...@@ -800,7 +800,7 @@ static PyObject* tensor_method__get_tensor_from_selected_rows( ...@@ -800,7 +800,7 @@ static PyObject* tensor_method__get_tensor_from_selected_rows(
auto* dense_tensor = auto* dense_tensor =
static_cast<phi::DenseTensor*>(selected_rows->mutable_value()); static_cast<phi::DenseTensor*>(selected_rows->mutable_value());
VLOG(1) << "dense_tensor: " << dense_tensor->IsInitialized(); VLOG(4) << "dense_tensor: " << dense_tensor->IsInitialized();
auto t = paddle::experimental::Tensor( auto t = paddle::experimental::Tensor(
egr::Controller::Instance().GenerateUniqueName()); egr::Controller::Instance().GenerateUniqueName());
......
...@@ -1243,7 +1243,7 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj, ...@@ -1243,7 +1243,7 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
ssize_t arg_pos) { ssize_t arg_pos) {
PyTypeObject* type = obj->ob_type; PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name); auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name; VLOG(4) << "type_name: " << type_name;
if (type_name == "numpy.ndarray" && PySequence_Check(obj)) { if (type_name == "numpy.ndarray" && PySequence_Check(obj)) {
PyObject* item = nullptr; PyObject* item = nullptr;
item = PySequence_GetItem(obj, 0); item = PySequence_GetItem(obj, 0);
...@@ -1296,7 +1296,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj, ...@@ -1296,7 +1296,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
// obj could be: int, float, bool, paddle.Tensor // obj could be: int, float, bool, paddle.Tensor
PyTypeObject* type = obj->ob_type; PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name); auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name; VLOG(4) << "type_name: " << type_name;
if (PyBool_Check(obj)) { if (PyBool_Check(obj)) {
bool value = CastPyArg2Boolean(obj, op_type, arg_pos); bool value = CastPyArg2Boolean(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value); return paddle::experimental::Scalar(value);
...@@ -1348,7 +1348,7 @@ std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj, ...@@ -1348,7 +1348,7 @@ std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj,
PyTypeObject* type = obj->ob_type; PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name); auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name; VLOG(4) << "type_name: " << type_name;
if (PyList_Check(obj)) { if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj); Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr; PyObject* item = nullptr;
......
...@@ -195,10 +195,10 @@ void BincountInferMeta(const MetaTensor& x, ...@@ -195,10 +195,10 @@ void BincountInferMeta(const MetaTensor& x,
"But the dimension of Input(X) is [%d]", "But the dimension of Input(X) is [%d]",
input_dim.size())); input_dim.size()));
VLOG(1) << "####### CHECK weights"; VLOG(4) << "####### CHECK weights";
if (weights) { if (weights) {
auto weights_dim = weights.dims(); auto weights_dim = weights.dims();
VLOG(1) << "##### weights_dim " << weights_dim; VLOG(4) << "##### weights_dim " << weights_dim;
PADDLE_ENFORCE_EQ(weights_dim.size(), PADDLE_ENFORCE_EQ(weights_dim.size(),
1, 1,
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
......
...@@ -3732,7 +3732,7 @@ void StridedSliceRawInferMeta(const MetaTensor& x, ...@@ -3732,7 +3732,7 @@ void StridedSliceRawInferMeta(const MetaTensor& x,
} }
out_dims = phi::make_ddim(new_out_shape); out_dims = phi::make_ddim(new_out_shape);
} }
VLOG(1) << "out_dims: " << out_dims; VLOG(4) << "out_dims: " << out_dims;
out->set_dims(out_dims); out->set_dims(out_dims);
out->share_lod(x); out->share_lod(x);
out->set_dtype(x.dtype()); out->set_dtype(x.dtype());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册