未验证 提交 2f8901cb 编写于 作者: L Leo Chen 提交者: GitHub

increase the level of some log (#47990)

上级 0d507fc2
......@@ -592,7 +592,7 @@ static bool CheckOpProto(proto::OpProto* op_proto) {
}
// Only handle matmul_v2 for now
VLOG(1) << "------ Analyzing Op ------: " << op_type;
VLOG(3) << "------ Analyzing Op ------: " << op_type;
return true;
}
......
......@@ -115,7 +115,7 @@ NOAMP_DYGRAPH_FUNCTION_TEMPLATE = "decltype({}({})) out = {}({});"
FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place)) {{
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
phi::backends::gpu::SetDeviceId(place.device);
VLOG(1) <<"CurrentDeviceId: " << phi::backends::gpu::GetCurrentDeviceId() << " from " << (int)place.device;
VLOG(4) <<"CurrentDeviceId: " << phi::backends::gpu::GetCurrentDeviceId() << " from " << (int)place.device;
#else
PADDLE_THROW(paddle::platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU if use CUDAPlace."));
......@@ -124,7 +124,7 @@ FUNCTION_SET_DEVICE_TEMPLATE = """{} if (paddle::platform::is_gpu_place(place
if (paddle::platform::is_custom_place(place)) {{
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
phi::DeviceManager::SetDevice(place);
VLOG(1) <<"CurrentDeviceId: " << phi::DeviceManager::GetDevice(place.GetDeviceType()) << " from " << (int)place.device;
VLOG(4) <<"CurrentDeviceId: " << phi::DeviceManager::GetDevice(place.GetDeviceType()) << " from " << (int)place.device;
#else
PADDLE_THROW(paddle::platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with CUSTOM_DEVICE if use CustomPlace."));
......
......@@ -32,7 +32,7 @@ static void clear_no_grad_edges(
for (size_t i = 0; i < params.size(); ++i) {
auto p_grad_name = paddle::framework::GradVarName(params[i].name());
if (!block_desc->HasVar(p_grad_name)) {
VLOG(1) << "clear edge of " << p_grad_name;
VLOG(3) << "clear edge of " << p_grad_name;
grad_node->MutableOutputMeta()[slot_id][i].GetMutableEdge().Clear();
}
}
......@@ -48,7 +48,7 @@ static void clear_no_grad_edges_with_partial_block(
auto p_grad_name = paddle::framework::GradVarName(params[i].name());
if (!forward_block_desc->HasVar(p_grad_name) &&
!backward_block_desc->HasVar(p_grad_name)) {
VLOG(1) << "clear edge of " << p_grad_name;
VLOG(3) << "clear edge of " << p_grad_name;
grad_node->MutableOutputMeta()[slot_id][i].GetMutableEdge().Clear();
}
}
......
......@@ -249,7 +249,7 @@ class TypedAttrChecker {
"doesn't support phi::DenseTensor type.",
attr_name_));
VLOG(1) << "Found Attribute " << attr_name_ << " with type(Variable).";
VLOG(3) << "Found Attribute " << attr_name_ << " with type(Variable).";
var_info_checker_(it->second);
return;
}
......
......@@ -223,7 +223,7 @@ void Tracer::TraceOpImpl(const std::string& type,
platform::RecordEvent op_type_record_event(
type, platform::TracerEventType::Operator, 1);
platform::ScopedFlushDenormal flush;
VLOG(1) << "Trace Op: " << type;
VLOG(4) << "Trace Op: " << type;
if (FLAGS_use_mkldnn) {
// if both lists are empty all ops are enabled (default for
// FLAGS_use_mkldnn=1)
......
......@@ -161,7 +161,7 @@ static PyObject* eager_api_run_partial_grad(PyObject* self,
only_inputs,
allow_unused,
no_grad_vars);
VLOG(1) << " in eager_api_run_partial_grad, after runing egr::Grad";
VLOG(4) << " in eager_api_run_partial_grad, after runing egr::Grad";
}
return ToPyObject(result, true /* return_py_none_if_not_initialize */);
EAGER_CATCH_AND_THROW_RETURN_NULL
......
......@@ -349,7 +349,7 @@ static PyObject* tensor__rsub__method(TensorObject* self,
1);
EAGER_TRY
VLOG(1) << "Running Eager tensor__rsub__method";
VLOG(4) << "Running Eager tensor__rsub__method";
// Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace();
......@@ -771,7 +771,7 @@ static PyObject* tensor__gt__method(TensorObject* self,
1);
EAGER_TRY
VLOG(1) << "Running Eager tensor__gt__method";
VLOG(4) << "Running Eager tensor__gt__method";
// Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace();
......@@ -857,7 +857,7 @@ static PyObject* tensor__ge__method(TensorObject* self,
1);
EAGER_TRY
VLOG(1) << "Running Eager tensor__ge__method";
VLOG(4) << "Running Eager tensor__ge__method";
// Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace();
......@@ -1134,7 +1134,7 @@ static PyObject* tensor__lt__method(TensorObject* self,
1);
EAGER_TRY
VLOG(1) << "Running Eager tensor__lt__method";
VLOG(4) << "Running Eager tensor__lt__method";
// Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace();
......@@ -1220,7 +1220,7 @@ static PyObject* tensor__le__method(TensorObject* self,
1);
EAGER_TRY
VLOG(1) << "Running Eager tensor__le__method";
VLOG(4) << "Running Eager tensor__le__method";
// Set Device ID
auto place = egr::Controller::Instance().GetExpectedPlace();
......
......@@ -800,7 +800,7 @@ static PyObject* tensor_method__get_tensor_from_selected_rows(
auto* dense_tensor =
static_cast<phi::DenseTensor*>(selected_rows->mutable_value());
VLOG(1) << "dense_tensor: " << dense_tensor->IsInitialized();
VLOG(4) << "dense_tensor: " << dense_tensor->IsInitialized();
auto t = paddle::experimental::Tensor(
egr::Controller::Instance().GenerateUniqueName());
......
......@@ -1243,7 +1243,7 @@ paddle::experimental::Scalar CastNumpy2Scalar(PyObject* obj,
ssize_t arg_pos) {
PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
VLOG(4) << "type_name: " << type_name;
if (type_name == "numpy.ndarray" && PySequence_Check(obj)) {
PyObject* item = nullptr;
item = PySequence_GetItem(obj, 0);
......@@ -1296,7 +1296,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
// obj could be: int, float, bool, paddle.Tensor
PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
VLOG(4) << "type_name: " << type_name;
if (PyBool_Check(obj)) {
bool value = CastPyArg2Boolean(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
......@@ -1348,7 +1348,7 @@ std::vector<phi::Scalar> CastPyArg2ScalarArray(PyObject* obj,
PyTypeObject* type = obj->ob_type;
auto type_name = std::string(type->tp_name);
VLOG(1) << "type_name: " << type_name;
VLOG(4) << "type_name: " << type_name;
if (PyList_Check(obj)) {
Py_ssize_t len = PyList_Size(obj);
PyObject* item = nullptr;
......
......@@ -195,10 +195,10 @@ void BincountInferMeta(const MetaTensor& x,
"But the dimension of Input(X) is [%d]",
input_dim.size()));
VLOG(1) << "####### CHECK weights";
VLOG(4) << "####### CHECK weights";
if (weights) {
auto weights_dim = weights.dims();
VLOG(1) << "##### weights_dim " << weights_dim;
VLOG(4) << "##### weights_dim " << weights_dim;
PADDLE_ENFORCE_EQ(weights_dim.size(),
1,
phi::errors::InvalidArgument(
......
......@@ -3732,7 +3732,7 @@ void StridedSliceRawInferMeta(const MetaTensor& x,
}
out_dims = phi::make_ddim(new_out_shape);
}
VLOG(1) << "out_dims: " << out_dims;
VLOG(4) << "out_dims: " << out_dims;
out->set_dims(out_dims);
out->share_lod(x);
out->set_dtype(x.dtype());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册