未验证 提交 4733fe60 编写于 作者: C Chen Weihang 提交者: GitHub

remove all is initialized using (#41766)

上级 54ccc308
......@@ -398,7 +398,7 @@ void EagerReducer::InitializeDenseGroups(
"GRAD is SelectedRows",
tensor_name));
PADDLE_ENFORCE_EQ(tensor.is_initialized(), true,
PADDLE_ENFORCE_EQ(tensor.initialized(), true,
platform::errors::PreconditionNotMet(
"Tensor %s is not initialized.", tensor_name));
const auto size = tensor.numel();
......@@ -710,7 +710,7 @@ void EagerReducer::MarkGroupReady(size_t group_index) {
bool EagerReducer::HasGrad(size_t var_index) {
auto grad = egr::EagerUtils::mutable_grad(tensors_[var_index]);
if (grad && grad->is_initialized()) {
if (grad && grad->initialized()) {
return true;
} else {
return false;
......
......@@ -125,7 +125,7 @@ void GradNodeBase::SetGradInMeta(const paddle::experimental::Tensor& fwd_out,
auto& meta = metas[0];
meta.SetStopGradient(fwd_out_meta->StopGradient());
if (!fwd_out.is_initialized()) {
if (!fwd_out.initialized()) {
VLOG(6)
<< "Skip Configuring GradSlotMeta for uninitialized GradInput Tensor";
return;
......@@ -192,7 +192,7 @@ void GradNodeBase::SetGradInMeta(
meta.SetStopGradient(fwd_out_meta->StopGradient());
}
if (!fwd_out_tensor.is_initialized()) {
if (!fwd_out_tensor.initialized()) {
VLOG(6)
<< "Skip Configuring GradSlotMeta for uninitialized GradInput Tensor";
return;
......
......@@ -114,7 +114,7 @@ static void ShareTensorsIntoScope(const std::vector<Tensor> &tensors,
paddle::framework::Scope *scope) {
for (size_t i = 0; i < tensors.size(); ++i) {
auto name = tensors[i].name();
if (name == "Fake_var" || !tensors[i].is_initialized()) {
if (name == "Fake_var" || !tensors[i].initialized()) {
continue;
}
auto *var = scope->Var(name);
......
......@@ -446,7 +446,7 @@ void EagerUtils::FillZeroForEmptyGradInputs(
for (size_t i = 0; i < in_grads->size(); i++) {
for (size_t j = 0; j < (*in_grads)[i].size(); j++) {
paddle::experimental::Tensor& grad = (*in_grads)[i][j];
if (!grad.is_initialized()) {
if (!grad.initialized()) {
const GradSlotMeta& grad_in_meta = grad_in_metas[i][j];
PADDLE_ENFORCE(
grad_in_meta.HasTensorMeta(),
......
......@@ -617,7 +617,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
// if index is a list, list_select_flag will be true
bool list_select_flag = false;
PADDLE_ENFORCE_EQ(
self->tensor.is_initialized(), true,
self->tensor.initialized(), true,
platform::errors::InvalidArgument(
"tensor %s has not been initialized, we can only slice initialized "
"tensor please init it first with numpy or other tensor.",
......@@ -1146,7 +1146,7 @@ static PyObject* tensor__copy_gradient_from(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto src = CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
if (self->tensor.is_initialized()) {
if (self->tensor.initialized()) {
PADDLE_ENFORCE_EQ(self->tensor.dtype(), src.dtype(),
platform::errors::PreconditionNotMet(
"Tensor %s has different data type with Tensor %s",
......
......@@ -73,7 +73,7 @@ Tensor::copy_to<phi::dtype::float16>(const Place &target_place) const;
void Tensor::copy_(const Tensor &src,
const phi::Place &target_place,
bool blocking) {
if (!src.is_initialized()) {
if (!src.initialized()) {
VLOG(8) << "Src is empty, skip copy";
return;
}
......@@ -81,7 +81,7 @@ void Tensor::copy_(const Tensor &src,
auto kernel_key_set = ParseKernelKeyByInputArgs(src);
KernelType kernel_type = ParseKernelTypeByInputArgs(src);
VLOG(3) << "Deep copy Tensor from " << src.name() << " to " << name();
if (is_initialized()) {
if (initialized()) {
PADDLE_ENFORCE_EQ(dtype(),
src.dtype(),
phi::errors::PreconditionNotMet(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册