From 4733fe601a46c4f1d1fa4c77c8eeb60638aabb50 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Thu, 14 Apr 2022 20:37:56 +0800 Subject: [PATCH] remove all is initialized using (#41766) --- paddle/fluid/distributed/collective/reducer.cc | 4 ++-- paddle/fluid/eager/grad_node_info.cc | 4 ++-- paddle/fluid/eager/to_static/run_program_op_node.h | 2 +- paddle/fluid/eager/utils.cc | 2 +- paddle/fluid/pybind/eager_method.cc | 4 ++-- paddle/phi/api/lib/tensor_method.cc | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/distributed/collective/reducer.cc b/paddle/fluid/distributed/collective/reducer.cc index 63e92444b32..75153df936b 100644 --- a/paddle/fluid/distributed/collective/reducer.cc +++ b/paddle/fluid/distributed/collective/reducer.cc @@ -398,7 +398,7 @@ void EagerReducer::InitializeDenseGroups( "GRAD is SelectedRows", tensor_name)); - PADDLE_ENFORCE_EQ(tensor.is_initialized(), true, + PADDLE_ENFORCE_EQ(tensor.initialized(), true, platform::errors::PreconditionNotMet( "Tensor %s is not initialized.", tensor_name)); const auto size = tensor.numel(); @@ -710,7 +710,7 @@ void EagerReducer::MarkGroupReady(size_t group_index) { bool EagerReducer::HasGrad(size_t var_index) { auto grad = egr::EagerUtils::mutable_grad(tensors_[var_index]); - if (grad && grad->is_initialized()) { + if (grad && grad->initialized()) { return true; } else { return false; diff --git a/paddle/fluid/eager/grad_node_info.cc b/paddle/fluid/eager/grad_node_info.cc index 72b84b9db32..5b4921320f6 100644 --- a/paddle/fluid/eager/grad_node_info.cc +++ b/paddle/fluid/eager/grad_node_info.cc @@ -125,7 +125,7 @@ void GradNodeBase::SetGradInMeta(const paddle::experimental::Tensor& fwd_out, auto& meta = metas[0]; meta.SetStopGradient(fwd_out_meta->StopGradient()); - if (!fwd_out.is_initialized()) { + if (!fwd_out.initialized()) { VLOG(6) << "Skip Configuring GradSlotMeta for uninitialized GradInput Tensor"; return; @@ -192,7 +192,7 @@ void GradNodeBase::SetGradInMeta( meta.SetStopGradient(fwd_out_meta->StopGradient()); } - if (!fwd_out_tensor.is_initialized()) { + if (!fwd_out_tensor.initialized()) { VLOG(6) << "Skip Configuring GradSlotMeta for uninitialized GradInput Tensor"; return; diff --git a/paddle/fluid/eager/to_static/run_program_op_node.h b/paddle/fluid/eager/to_static/run_program_op_node.h index 46f48778a96..9347a76fd48 100644 --- a/paddle/fluid/eager/to_static/run_program_op_node.h +++ b/paddle/fluid/eager/to_static/run_program_op_node.h @@ -114,7 +114,7 @@ static void ShareTensorsIntoScope(const std::vector &tensors, paddle::framework::Scope *scope) { for (size_t i = 0; i < tensors.size(); ++i) { auto name = tensors[i].name(); - if (name == "Fake_var" || !tensors[i].is_initialized()) { + if (name == "Fake_var" || !tensors[i].initialized()) { continue; } auto *var = scope->Var(name); diff --git a/paddle/fluid/eager/utils.cc b/paddle/fluid/eager/utils.cc index 756563df4df..66d877f06e2 100644 --- a/paddle/fluid/eager/utils.cc +++ b/paddle/fluid/eager/utils.cc @@ -446,7 +446,7 @@ void EagerUtils::FillZeroForEmptyGradInputs( for (size_t i = 0; i < in_grads->size(); i++) { for (size_t j = 0; j < (*in_grads)[i].size(); j++) { paddle::experimental::Tensor& grad = (*in_grads)[i][j]; - if (!grad.is_initialized()) { + if (!grad.initialized()) { const GradSlotMeta& grad_in_meta = grad_in_metas[i][j]; PADDLE_ENFORCE( grad_in_meta.HasTensorMeta(), diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 4610196726e..8304db13c46 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -617,7 +617,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, // if index is a list, list_select_flag will be true bool list_select_flag = false; PADDLE_ENFORCE_EQ( - self->tensor.is_initialized(), true, + self->tensor.initialized(), true, platform::errors::InvalidArgument( "tensor %s has not been initialized, we can only slice initialized " "tensor please init it first with numpy or other tensor.", @@ -1146,7 +1146,7 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self, PyObject* args, PyObject* kwargs) { EAGER_TRY auto src = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); - if (self->tensor.is_initialized()) { + if (self->tensor.initialized()) { PADDLE_ENFORCE_EQ(self->tensor.dtype(), src.dtype(), platform::errors::PreconditionNotMet( "Tensor %s has different data type with Tensor %s", diff --git a/paddle/phi/api/lib/tensor_method.cc b/paddle/phi/api/lib/tensor_method.cc index 79519f67d2a..51d4ec28200 100644 --- a/paddle/phi/api/lib/tensor_method.cc +++ b/paddle/phi/api/lib/tensor_method.cc @@ -73,7 +73,7 @@ Tensor::copy_to(const Place &target_place) const; void Tensor::copy_(const Tensor &src, const phi::Place &target_place, bool blocking) { - if (!src.is_initialized()) { + if (!src.initialized()) { VLOG(8) << "Src is empty, skip copy"; return; } @@ -81,7 +81,7 @@ void Tensor::copy_(const Tensor &src, auto kernel_key_set = ParseKernelKeyByInputArgs(src); KernelType kernel_type = ParseKernelTypeByInputArgs(src); VLOG(3) << "Deep copy Tensor from " << src.name() << " to " << name(); - if (is_initialized()) { + if (initialized()) { PADDLE_ENFORCE_EQ(dtype(), src.dtype(), phi::errors::PreconditionNotMet( -- GitLab