utils.cc 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/eager/utils.h"
16
#include "paddle/fluid/eager/accumulation/accumulation_node.h"
17
#include "paddle/fluid/eager/api/utils/global_utils.h"
18
#include "paddle/fluid/eager/api/utils/hook_utils.h"
19
#include "paddle/fluid/eager/tensor_wrapper.h"
20

21 22
#include "paddle/phi/api/all.h"
#include "paddle/phi/common/layout.h"
23
#include "paddle/phi/core/compat/convert_utils.h"
24
#include "paddle/phi/core/tensor_meta.h"
25 26

#include "paddle/fluid/framework/data_layout.h"
27
#include "paddle/fluid/framework/phi_utils.h"
28
#include "paddle/fluid/framework/variable.h"
29

30 31
PADDLE_DEFINE_EXPORTED_bool(retain_grad_for_all_tensor,
                            false,
32 33
                            "retain grad for all tensor");

34
namespace egr {
35 36
/**
 * Implementation of Eager Utils.
37
 **/
38

39
AutogradMeta* EagerUtils::autograd_meta(paddle::experimental::Tensor* target) {
40 41 42 43 44 45 46 47 48
  auto* p_autograd_meta = target->get_autograd_meta();
  if (!p_autograd_meta) {
    auto p_autograd_meta_ptr = std::make_shared<AutogradMeta>();
    p_autograd_meta = p_autograd_meta_ptr.get();
    target->set_autograd_meta(p_autograd_meta_ptr);
  }
  return static_cast<AutogradMeta*>(p_autograd_meta);
}

49 50
AutogradMeta* EagerUtils::unsafe_autograd_meta(
    const paddle::experimental::Tensor& target) {
51 52 53 54 55 56 57 58
  auto* p_autograd_meta = target.get_autograd_meta();
  PADDLE_ENFORCE(p_autograd_meta,
                 paddle::platform::errors::Fatal(
                     "Null autograd_meta gotten from unsafe_autograd_meta()"));
  return static_cast<AutogradMeta*>(p_autograd_meta);
}

std::vector<AutogradMeta*> EagerUtils::unsafe_autograd_meta(
59
    const std::vector<paddle::experimental::Tensor>& targets) {
60
  std::vector<AutogradMeta*> metas;
61
  metas.reserve(targets.size());
62
  for (const paddle::experimental::Tensor& t : targets) {
63
    metas.emplace_back(unsafe_autograd_meta(t));
64 65 66 67
  }
  return metas;
}

68
AutogradMeta* EagerUtils::nullable_autograd_meta(
69
    const paddle::experimental::Tensor& target) {
70 71 72 73 74 75
  auto* p_autograd_meta = target.get_autograd_meta();
  if (!p_autograd_meta) return nullptr;

  return static_cast<AutogradMeta*>(p_autograd_meta);
}

H
hong 已提交
76
AutogradMeta* EagerUtils::nullable_autograd_meta(
77
    const paddle::optional<paddle::experimental::Tensor>& target) {
H
hong 已提交
78 79 80 81 82 83
  if (target.get_ptr() != nullptr) {
    return EagerUtils::nullable_autograd_meta(*(target.get_ptr()));
  }
  return nullptr;
}

84
std::vector<AutogradMeta*> EagerUtils::nullable_autograd_meta(
85
    const std::vector<paddle::experimental::Tensor>& targets) {
86 87
  std::vector<AutogradMeta*> metas;
  metas.reserve(targets.size());
88
  for (const paddle::experimental::Tensor& t : targets) {
89 90 91 92 93
    metas.emplace_back(nullable_autograd_meta(t));
  }
  return metas;
}

W
wanghuancoder 已提交
94 95 96 97 98 99 100 101 102 103
std::vector<AutogradMeta*> EagerUtils::nullable_autograd_meta(
    const std::vector<paddle::experimental::Tensor*>& targets) {
  std::vector<AutogradMeta*> metas;
  metas.reserve(targets.size());
  for (const paddle::experimental::Tensor* t : targets) {
    metas.emplace_back(nullable_autograd_meta(*t));
  }
  return metas;
}

104
std::vector<AutogradMeta*> EagerUtils::autograd_meta(
105
    std::vector<paddle::experimental::Tensor>* targets) {
106 107 108
  std::vector<AutogradMeta*> ret;
  ret.reserve(targets->size());

109
  // for autograd_meta we can tolerent it has nullptr.
110 111 112
  for (size_t i = 0; i < targets->size(); i++) {
    auto* p_autograd_meta = autograd_meta(&((*targets)[i]));
    ret.emplace_back(p_autograd_meta);
113 114 115 116
  }
  return ret;
}

W
wanghuancoder 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129
std::vector<AutogradMeta*> EagerUtils::autograd_meta(
    std::vector<paddle::experimental::Tensor*>* targets) {
  std::vector<AutogradMeta*> ret;
  ret.reserve(targets->size());

  // for autograd_meta we can tolerent it has nullptr.
  for (size_t i = 0; i < targets->size(); i++) {
    auto* p_autograd_meta = autograd_meta((*targets)[i]);
    ret.emplace_back(p_autograd_meta);
  }
  return ret;
}

130
std::pair<size_t, size_t> EagerUtils::OutRankInfo(
131
    const paddle::experimental::Tensor& target) {
132 133 134 135
  return unsafe_autograd_meta(target)->OutRankInfo();
}

std::shared_ptr<GradNodeBase> EagerUtils::grad_node(
136
    const paddle::experimental::Tensor& target) {
137 138 139 140 141 142
  auto* meta = nullable_autograd_meta(target);
  if (meta) {
    return meta->GetMutableGradNode();
  } else {
    return nullptr;
  }
143 144
}

145 146 147 148 149 150 151 152 153 154
paddle::experimental::Tensor* EagerUtils::mutable_grad(
    const paddle::experimental::Tensor& target) {
  auto* meta = nullable_autograd_meta(target);
  if (meta) {
    return meta->MutableGrad();
  } else {
    return nullptr;
  }
}

155 156 157
void EagerUtils::SetHistory(std::vector<AutogradMeta*>* autograd_metas,
                            const std::shared_ptr<GradNodeBase>& grad_node) {
  for (const auto& autograd_meta : *autograd_metas) {
J
Jiabin Yang 已提交
158 159 160
    if (autograd_meta->GradNode()) {
      VLOG(7) << "Should not set grad node twice, original node is:"
              << autograd_meta->GradNode()->name()
161
              << " current is: " << grad_node->name();
162
    }
163 164 165 166 167 168
    autograd_meta->SetGradNode(grad_node);
  }
}

void EagerUtils::SetHistory(AutogradMeta* autograd_meta,
                            const std::shared_ptr<GradNodeBase>& grad_node) {
J
Jiabin Yang 已提交
169 170 171 172
  if (autograd_meta->GradNode()) {
    VLOG(7) << "Should not set grad node twice, original node is:"
            << autograd_meta->GradNode()->name()
            << "current is: " << grad_node->name();
173
  }
174 175 176 177 178 179 180 181 182 183 184 185 186 187
  autograd_meta->SetGradNode(grad_node);
}

void EagerUtils::SetOutRankWithSlot(std::vector<AutogradMeta*>* targets,
                                    size_t slot_id) {
  // Set OutRankInfo from 0 to size of targets
  for (size_t i = 0; i < targets->size(); i++) {
    (*targets)[i]->SetSingleOutRankWithSlot(slot_id, i);
  }
}
void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) {
  target->SetSingleOutRankWithSlot(slot_id, 0);
}

188
std::shared_ptr<egr::EagerVariable> EagerUtils::TrySyncToVar(
189
    const paddle::experimental::Tensor& tensor) {
190
  return std::make_shared<egr::EagerVariable>(tensor);
191 192
}

193
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
194
    const paddle::experimental::Tensor& tensor) {
195 196 197
  return {TrySyncToVar(tensor)};
}

198
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
199 200 201 202 203 204 205
    paddle::experimental::Tensor* tensor) {
  PADDLE_ENFORCE_NOT_NULL(
      tensor,
      paddle::platform::errors::Fatal(
          "Should Not Pass Empty tensor pointer in, since only output can "
          "reach this, please check output value and make sure it's not null"));
  return {TrySyncToVar(*tensor)};
206 207
}

208
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
209
    const std::vector<paddle::experimental::Tensor*>& tensors) {
210
  std::vector<std::shared_ptr<EagerVariable>> res;
211 212 213
  size_t num = tensors.size();
  res.reserve(num);
  for (size_t i = 0; i < num; i++) {
214 215
    auto* tensor = tensors[i];
    PADDLE_ENFORCE_NOT_NULL(
216 217 218 219 220 221
        tensor,
        paddle::platform::errors::Fatal(
            "Tensor is null and cannot be copied. "
            "We are tring to TrySyncToVars tensor from its "
            "shared_ptr, this error may indicate some outputs "
            "are nullptr"));
222
    res.emplace_back(TrySyncToVar(*tensor));
223 224 225 226
  }
  return res;
}

227
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
228
    const std::vector<paddle::experimental::Tensor>& tensors) {
229
  std::vector<std::shared_ptr<EagerVariable>> res;
230 231 232
  size_t num = tensors.size();
  res.reserve(num);
  for (size_t i = 0; i < num; i++) {
233
    res.emplace_back(TrySyncToVar(tensors[i]));
234 235 236 237
  }
  return res;
}

238
std::vector<std::shared_ptr<EagerVariable>> EagerUtils::CreateVars(
239
    const size_t num) {
240
  std::vector<std::shared_ptr<EagerVariable>> res;
241 242 243
  res.reserve(num);
  for (size_t i = 0; i < num; i++) {
    res.emplace_back(
244
        new EagerVariable(egr::Controller::Instance().GenerateUniqueName()));
245 246 247 248
  }
  return res;
}

249 250 251 252
void EagerUtils::HandleViewBetweenInputAndOutput(
    const std::shared_ptr<EagerVariable>& input_var,
    const std::shared_ptr<EagerVariable>& view_output_var) {
  PADDLE_ENFORCE_EQ(
253 254
      input_var->Var().IsInitialized(),
      true,
255 256 257 258 259 260 261
      paddle::platform::errors::InvalidArgument(
          "Tensor %s has not been initialized!", input_var->name()));

  if (phi::DenseTensor::classof(input_var->GetTensorBase().get())) {
    auto input_dense_tensor =
        std::dynamic_pointer_cast<phi::DenseTensor>(input_var->GetTensorBase());
    PADDLE_ENFORCE_EQ(
262 263
        input_dense_tensor->IsInitialized(),
        true,
264 265 266 267 268 269 270 271 272 273 274 275 276 277
        paddle::platform::errors::InvalidArgument(
            "DenseTensor %s has not been initialized!", input_var->name()));

    auto* view_output_tensor =
        view_output_var->MutableVar()->GetMutable<phi::DenseTensor>();
    view_output_tensor->ShareBufferWith(*input_dense_tensor);
    view_output_tensor->ShareInplaceVersionCounterWith(*input_dense_tensor);

    VLOG(3) << "Perform View between Output Var(" << view_output_var->name()
            << ") and Input Var(" << input_var->name()
            << "), share allocation and inplace version.";
  }
}

278 279 280 281
void EagerUtils::HandleViewBetweenInputAndOutput(
    const paddle::experimental::Tensor& input_tensor,
    paddle::experimental::Tensor* view_output_tensor) {
  PADDLE_ENFORCE_EQ(
282 283
      input_tensor.initialized(),
      true,
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
      paddle::platform::errors::InvalidArgument(
          "Tensor %s has not been initialized!", input_tensor.name()));

  if (input_tensor.is_dense_tensor()) {
    auto input_dense_tensor =
        std::dynamic_pointer_cast<phi::DenseTensor>(input_tensor.impl());
    if (view_output_tensor->impl() == nullptr) {
      view_output_tensor->set_impl(std::make_shared<phi::DenseTensor>());
    }
    auto view_output_dense_tensor =
        std::dynamic_pointer_cast<phi::DenseTensor>(view_output_tensor->impl());
    view_output_dense_tensor->ShareBufferWith(*input_dense_tensor);
    view_output_dense_tensor->ShareInplaceVersionCounterWith(
        *input_dense_tensor);

299
    VLOG(4) << "Perform View between Output Tensor("
300 301 302 303 304 305
            << view_output_tensor->name() << ") and Input Tensor("
            << input_tensor.name()
            << "), share allocation and inplace version.";
  }
}

306
std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
307
    const std::vector<std::shared_ptr<EagerVariable>>& outs) {
308
  std::vector<paddle::experimental::Tensor> res;
309 310 311
  res.reserve(outs.size());
  for (const auto& out : outs) {
    PADDLE_ENFORCE_NOT_NULL(
312 313 314 315 316 317 318
        out.get(),
        paddle::platform::errors::Fatal(
            "Eager Tensor %s is null and cannot be copied. "
            "We are tring to Get Output tensor from its "
            "shared_ptr, this error may indicate some outputs "
            "are nullptr",
            out->name()));
319
    res.emplace_back(out->GetTensorBase(), out->name());
320 321 322 323
  }
  return res;
}

324
paddle::experimental::Tensor EagerUtils::GetOutput(
325
    const std::shared_ptr<EagerVariable>& out) {
326
  PADDLE_ENFORCE_NOT_NULL(
327 328 329 330 331 332
      out.get(),
      paddle::platform::errors::Fatal(
          "Eager Tensor %s is null and cannot be copied. We "
          "are tring to Get Output tensor from its shared_ptr, "
          "this error may indicate output is nullptr",
          out->name()));
333 334 335
  return paddle::experimental::Tensor(out->GetTensorBase(), out->name());
}

336 337
void EagerUtils::GetOutput(const std::shared_ptr<EagerVariable>& out,
                           paddle::experimental::Tensor* out_var) {
338
  PADDLE_ENFORCE_NOT_NULL(
339 340 341 342 343 344
      out_var,
      paddle::platform::errors::Fatal(
          "Tensor is null and cannot be copied. "
          "We are tring to OverwriteOutput from its "
          "shared_ptr, this error may indicate some outputs "
          "are nullptr"));
345
  out_var->set_impl(out->GetTensorBase());
346
  out_var->set_name(out->name());
347 348
}

349
void EagerUtils::GetOutputs(
350
    const std::vector<std::shared_ptr<EagerVariable>>& outs,
351
    std::vector<paddle::experimental::Tensor>* result) {
352
  for (size_t i = 0; i < outs.size(); i++) {
353
    result->emplace_back(outs[i]->GetTensorBase());
354
  }
355 356
}

357 358 359
void EagerUtils::GetOutputs(
    const std::vector<std::shared_ptr<EagerVariable>>& outs,
    const std::vector<paddle::experimental::Tensor*>& out_var) {
360 361
  for (size_t i = 0; i < outs.size(); i++) {
    PADDLE_ENFORCE_NOT_NULL(
362 363 364 365 366 367
        out_var[i],
        paddle::platform::errors::Fatal(
            "Tensor is null and cannot be copied. "
            "We are tring to OverwriteOutput from its "
            "shared_ptr, this error may indicate some outputs "
            "are nullptr"));
368 369 370 371 372 373 374 375 376 377 378 379 380
    out_var[i]->set_impl(outs[i]->GetTensorBase());
  }
}

void EagerUtils::GetOutputs(const std::shared_ptr<EagerVariable>& out,
                            std::vector<paddle::experimental::Tensor>* result) {
  result->emplace_back(out->GetTensorBase());
}

void EagerUtils::GetOutputs(
    const std::shared_ptr<EagerVariable>& out,
    const std::vector<paddle::experimental::Tensor*>& out_var) {
  PADDLE_ENFORCE_NOT_NULL(
381 382 383 384 385 386
      out_var[0],
      paddle::platform::errors::Fatal(
          "Tensor is null and cannot be copied. "
          "We are tring to OverwriteOutput from its "
          "shared_ptr, this error may indicate some outputs "
          "are nullptr"));
387 388 389 390 391 392 393 394 395
  out_var[0]->set_impl(out->GetTensorBase());
}

void EagerUtils::Output2Result(
    const std::vector<paddle::experimental::Tensor*>& out_var,
    std::vector<paddle::experimental::Tensor>* result) {
  result->reserve(out_var.size());
  for (size_t i = 0; i < out_var.size(); i++) {
    result->emplace_back(*out_var[i]);
396 397 398 399
  }
}

paddle::experimental::Tensor EagerUtils::RecoverTensorWrapper(
400 401
    TensorWrapper* tw) {
  return tw->recover();
402 403
}

404
std::vector<paddle::experimental::Tensor> EagerUtils::RecoverTensorWrapper(
405
    std::vector<TensorWrapper>* tw) {
406
  std::vector<paddle::experimental::Tensor> ret;
407
  for (auto& t : *tw) {
408
    ret.emplace_back(t.recover());
409 410 411
  }
  return ret;
}
412
// TODO(jiabin): remove all this when we fix all test using tmp grad
413 414
void EagerUtils::CheckAndRetainGrad(
    const paddle::experimental::Tensor& tensor) {
415 416
  VLOG(6) << "Check RetainGradForTensor: " << tensor.name();
  if (FLAGS_retain_grad_for_all_tensor) {
417
    VLOG(6) << "RetainGradForTensor: " << tensor.name();
418 419 420 421 422
    egr::egr_utils_api::RetainGradForTensor(tensor);
  }
}

void EagerUtils::CheckAndRetainGrad(
423
    const std::vector<paddle::experimental::Tensor>& tensors) {
424 425
  if (FLAGS_retain_grad_for_all_tensor) {
    for (auto& tensor : tensors) {
426
      VLOG(6) << "RetainGradForTensor: " << tensor.name();
427 428 429 430 431
      egr::egr_utils_api::RetainGradForTensor(tensor);
    }
  }
}

W
wanghuancoder 已提交
432 433 434 435 436 437 438 439 440 441
void EagerUtils::CheckAndRetainGrad(
    const std::vector<paddle::experimental::Tensor*>& tensors) {
  if (FLAGS_retain_grad_for_all_tensor) {
    for (auto& tensor : tensors) {
      VLOG(6) << "RetainGradForTensor: " << tensor->name();
      egr::egr_utils_api::RetainGradForTensor(*tensor);
    }
  }
}

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
std::shared_ptr<egr::GradNodeBase> EagerUtils::GetGradAccumulationNode(
    const paddle::experimental::Tensor& tensor) {
  auto* autograd_ptr = nullable_autograd_meta(tensor);
  if (!autograd_ptr) {
    return nullptr;
  }
  auto node_ptr = autograd_ptr->GetMutableGradNode();
  if (node_ptr && node_ptr.get()) {
    if (!autograd_ptr->StopGradient()) {
      auto accumulation_ptr =
          std::dynamic_pointer_cast<GradNodeAccumulation>(node_ptr);
      if (accumulation_ptr) {
        return accumulation_ptr;
      } else {
        // Current GradNode is not a egr::GradNodeAccumulation
        PADDLE_THROW(paddle::platform::errors::Fatal(
            "GetGradAccumulationNode should only be called on leaf tensor, but "
            "target tensor: %s has GradNode which is not a "
            "GradNodeAccumulation, and this should not happend unless target "
            "tensor is modified by some ops and calling set history for it.",
            tensor.name()));
      }
    } else {
      // Current Tensor does not have grad since it's stop_gradient is true;
      return nullptr;
    }
  } else {
    if (!autograd_ptr->StopGradient()) {
      VLOG(6) << "Add GradNodeAccumulation for tensor: " << tensor.name();
471 472
      autograd_ptr->SetGradNode(
          std::make_shared<egr::GradNodeAccumulation>(autograd_ptr));
473 474 475 476 477 478 479
      return autograd_ptr->GetMutableGradNode();
    } else {
      return nullptr;
    }
  }
}

W
wanghuancoder 已提交
480 481 482
void EagerUtils::FillZeroForEmptyOptionalGradInput(
    std::vector<paddle::experimental::Tensor>* in_grads,
    const std::vector<GradSlotMeta>& grad_in_metas) {
483
  for (size_t i = 0; i < in_grads->size(); i++) {
W
wanghuancoder 已提交
484 485 486
    paddle::experimental::Tensor& grad = (*in_grads)[i];
    if (!grad.initialized() && grad_in_metas[i].HasTensorMeta()) {
      auto tensor_with_zero = paddle::experimental::full(
487 488 489 490
          phi::vectorize(grad_in_metas[i].GetTensorMeta().dims),
          0.0,
          grad_in_metas[i].GetTensorMeta().dtype,
          grad_in_metas[i].GetPlace());
W
wanghuancoder 已提交
491
      grad.set_impl(tensor_with_zero.impl());
492 493 494 495
    }
  }
}

496 497 498 499 500 501 502 503 504
void EagerUtils::FillZeroForEmptyGradInput(
    paddle::experimental::Tensor* in_grad, const GradSlotMeta& grad_in_meta) {
  if (!in_grad->initialized()) {
    PADDLE_ENFORCE(
        grad_in_meta.HasTensorMeta(),
        paddle::platform::errors::Fatal(
            "Unable to fill empty grad inputs due to empty GradSlotMeta"));
    const auto& tensor_meta = grad_in_meta.GetTensorMeta();
    auto tensor_with_zero =
505 506 507 508
        paddle::experimental::full(phi::vectorize(tensor_meta.dims),
                                   0.0,
                                   tensor_meta.dtype,
                                   grad_in_meta.GetPlace());
509 510 511 512 513 514 515 516 517
    in_grad->set_impl(tensor_with_zero.impl());
  }
}

void EagerUtils::FillZeroForEmptyOptionalGradInput(
    paddle::experimental::Tensor* in_grad, const GradSlotMeta& grad_in_meta) {
  if (!in_grad->initialized() && grad_in_meta.HasTensorMeta()) {
    const auto& tensor_meta = grad_in_meta.GetTensorMeta();
    auto tensor_with_zero =
518 519 520 521
        paddle::experimental::full(phi::vectorize(tensor_meta.dims),
                                   0.0,
                                   tensor_meta.dtype,
                                   grad_in_meta.GetPlace());
522 523 524 525 526 527 528 529 530 531 532 533
    in_grad->set_impl(tensor_with_zero.impl());
  }
}

void EagerUtils::FillZeroForEmptyGradInput(
    std::vector<paddle::experimental::Tensor>* in_grads,
    const std::vector<GradSlotMeta>& grad_in_metas) {
  for (size_t i = 0; i < in_grads->size(); i++) {
    FillZeroForEmptyGradInput(&in_grads->at(i), grad_in_metas[i]);
  }
}

534
}  // namespace egr