while_op.cc 29.8 KB
Newer Older
C
chengduo 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
Y
Yang Yang(Tony) 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/executor.h"
16
#include "paddle/fluid/framework/new_executor/standalone_executor.h"
Y
Yi Wang 已提交
17 18
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
19
#include "paddle/fluid/operators/controlflow/control_flow_op_helper.h"
S
sneaxiy 已提交
20
#include "paddle/fluid/operators/controlflow/while_op_helper.h"
Y
Yang Yang(Tony) 已提交
21

22 23 24
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
W
wanghuancoder 已提交
25 26 27 28 29 30 31 32
namespace paddle {
namespace framework {
class InferShapeContext;
class OpDesc;
class VarDesc;
}  // namespace framework
}  // namespace paddle

Y
Yang Yang(Tony) 已提交
33 34 35 36 37
namespace paddle {
namespace operators {

using StepScopeVar = std::vector<framework::Scope *>;

S
sneaxiy 已提交
38 39 40 41 42 43 44 45 46 47 48
namespace {  // NOLINT
static std::string GetSkipEagerDeletionVarsDebugString(
    const std::vector<std::string> &vars) {
  std::string str = "Skip " + std::to_string(vars.size()) +
                    " var(s) in eager deletion mode: ";
  for (auto &var : vars) {
    str.append(var);
    str.push_back(' ');
  }
  return str;
}
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83

static void TransferVariablePlace(const framework::Scope *scope,
                                  const std::string &var_name,
                                  const phi::Place &dst_place,
                                  const platform::DeviceContext &dev_ctx) {
  framework::Variable *var = scope->FindVar(var_name);
  if (var == nullptr) {
    VLOG(4) << "[TransferVariablePlace]"
            << "lost in_var: " << var_name;
    return;
  }
  if (var->Type() != framework::proto::VarType::LOD_TENSOR) {
    VLOG(10) << "[TransferVariablePlace]" << var_name << " type changed:"
             << framework::TransToPhiDataType(
                    framework::ToVarType(var->Type()));
    return;
  }
  phi::DenseTensor *t = var->GetMutable<phi::DenseTensor>();
  if (t->place() == dst_place) {
    VLOG(10) << "[TransferVariablePlace]"
             << "no need transfer: " << var_name;
    return;
  }

  phi::DenseTensor *new_t = new phi::DenseTensor;
  framework::TensorCopy(*t, dst_place, new_t);
  dev_ctx.Wait();

  t->set_meta(new_t->meta());
  t->ResetHolder(new_t->Holder());

  VLOG(4) << "[TransferVariablePlace]" << var_name
          << " place: " << new_t->place();
}

84
}  // namespace
Y
Yang Yang(Tony) 已提交
85 86 87

class WhileOp : public framework::OperatorBase {
 public:
88 89
  WhileOp(const std::string &type,
          const framework::VariableNameMap &inputs,
Y
Yang Yang(Tony) 已提交
90 91 92 93
          const framework::VariableNameMap &outputs,
          const framework::AttributeMap &attrs)
      : framework::OperatorBase(type, inputs, outputs, attrs) {}

94 95 96
 private:
  void RunImpl(const framework::Scope &scope,
               const platform::Place &dev_place) const override {
97 98 99
    PADDLE_ENFORCE_NOT_NULL(scope.FindVar(Input(kCondition)),
                            platform::errors::NotFound(
                                "Input(Condition) of WhileOp is not found."));
100

101
    auto &cond = scope.FindVar(Input(kCondition))->Get<phi::DenseTensor>();
102
    PADDLE_ENFORCE_EQ(
103 104
        cond.numel(),
        1,
105
        platform::errors::InvalidArgument(
106 107 108
            "The numel of Input(Condition) of WhileOp must be 1. But now "
            "the Condition's numel is ",
            cond.numel(),
109
            ".\n"));
Y
Yang Yang(Tony) 已提交
110

111
#ifdef PADDLE_WITH_MKLDNN
112 113
    // Executor on being destroyed clears oneDNN cache and resets
    // registered model data layout. This is unwanted for nested
114 115 116
    // Executors (executors declared inside control ops)
    platform::DontClearMKLDNNCache(dev_place);
#endif
Y
Yu Yang 已提交
117
    auto *block = Attr<framework::BlockDesc *>(kStepBlock);
D
dzhwinter 已提交
118

119 120 121 122
    // get device context from pool
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &dev_ctx = *pool.Get(dev_place);

123 124 125 126
    bool is_test = Attr<bool>("is_test");

    std::set<std::string> no_copy_var_names;
    if (!is_test) {
127 128 129 130 131 132 133 134 135 136 137 138 139
      // set all persistable parameters into no_copy_var_names.
      auto *global_block = block;

      while (global_block->ID() != 0)
        global_block = global_block->ParentBlock();
      auto all_vars = global_block->AllVars();
      std::for_each(all_vars.begin(),
                    all_vars.end(),
                    [&no_copy_var_names](framework::VarDesc *var) {
                      if (var->IsParameter())
                        no_copy_var_names.insert(var->Name());
                    });

140 141 142 143 144 145 146 147 148 149 150 151 152
      const std::vector<framework::OpDesc *> &all_ops = block->AllOps();
      for (const framework::OpDesc *op : all_ops) {
        const framework::VariableNameMap &input_var_names = op->Inputs();
        const framework::VariableNameMap &output_var_names = op->Outputs();
        for (auto &ipt : input_var_names) {
          for (const std::string &var_name : ipt.second) {
            if (StrInVaraiableNameMap(var_name, output_var_names)) {
              no_copy_var_names.insert(var_name);
            }
          }
        }
      }
    }
Y
Yang Yang(Tony) 已提交
153 154 155

    auto step_scopes =
        scope.FindVar(Output(kStepScopes))->GetMutable<StepScopeVar>();
156 157 158 159 160 161 162 163 164 165 166

    if (step_scopes->size() > 0) {
      platform::DeviceContextPool::Instance().Get(dev_place)->Wait();
      for (auto &s : *step_scopes) {
        if (scope.HasKid(s)) {
          scope.DeleteScope(s);
        }
      }
      step_scopes->clear();
    }

167 168
    PADDLE_ENFORCE_EQ(step_scopes->size(),
                      0,
169 170
                      platform::errors::PreconditionNotMet(
                          "The Output(StepScope) of WhileOp should be empty."));
X
Xin Pan 已提交
171

172
    bool cond_data = GetCondData(cond);
S
sneaxiy 已提交
173
    auto &skip_vars = Attr<std::vector<std::string>>(kSkipEagerDeletionVars);
S
sneaxiy 已提交
174
    VLOG(2) << GetSkipEagerDeletionVarsDebugString(skip_vars);
S
fix bug  
sneaxiy 已提交
175

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
    // note(lvyongkang): The assign op in while loop may change the place of
    // variable. However, InterpreterCore fix the kernel of every ops during its
    // first run. A cpu tensor may become gpu tensor after first run. This will
    // lead to segmetation fault when it's used in a cpu kernel. Here we record
    // the place of every inputs and restore their place after
    // InterpreterCore.run().
    std::map<std::string, phi::Place> input_var_original_places;
    for (const auto &in_name : Inputs(kX)) {
      framework::Variable *var = scope.FindVar(in_name);
      if (var == nullptr) {
        VLOG(4) << "[while op]"
                << "input not found:" << in_name;
      }

      if (var->Type() == framework::proto::VarType::LOD_TENSOR) {
        input_var_original_places[in_name] =
            (var->Get<phi::DenseTensor>()).place();
      } else {
        VLOG(10) << "[while op]"
                 << "skip backup input " << in_name << " type:"
                 << framework::TransToPhiDataType(
                        framework::ToVarType(var->Type()));
      }
    }

201 202 203 204 205 206 207 208 209 210 211 212
    LOG_FIRST_N(INFO, 1) << "[ControlFlow][WhileOp] New Executor is Running.";
    if (!core_ || !platform::is_same_place(core_->GetPlace(), dev_place)) {
      framework::Scope placeholder;  // Don't care if it's valid, just for
                                     // initialize InterpreterCore
      framework::interpreter::ExecutionConfig execution_config;
      execution_config.create_local_scope = false;
      execution_config.used_for_control_flow_op = true;
      execution_config.skip_gc_vars =
          std::set<std::string>(skip_vars.begin(), skip_vars.end());

      core_.reset(new framework::InterpreterCore(
          dev_place, *block, &placeholder, execution_config));
213 214
    }

215
    if (!is_test) {
216
      while (cond_data) {
217 218
        auto &current_scope = scope.NewScope();
        step_scopes->push_back(&current_scope);
219 220 221 222 223 224 225

        std::vector<std::string> rename_vars;
        for (const std::string &input_var_name : Inputs(kX)) {
          if (no_copy_var_names.find(input_var_name) ==
              no_copy_var_names.end()) {
            std::string input_var_rename = input_var_name + kSuffix;
            framework::Variable *input_var = scope.FindVar(input_var_name);
226
            if (input_var->IsType<phi::DenseTensor>()) {
227
              rename_vars.push_back(input_var_rename);
228
              auto input_var_tensor = input_var->Get<phi::DenseTensor>();
229
              auto *rename_input_var_tensor =
230 231
                  current_scope.Var(input_var_rename)
                      ->GetMutable<phi::DenseTensor>();
232 233
              framework::TensorCopy(
                  input_var_tensor, dev_place, rename_input_var_tensor);
234 235 236 237
              rename_input_var_tensor->set_lod(input_var_tensor.lod());
            }
          }
        }
238

239 240 241 242 243 244 245 246 247 248
        BuildScopeForControlFlowOp(*core_, *block, &current_scope);
        core_->reset_scope(&current_scope);
        core_->Run({}, false);

        // restore inputs place
        for (const auto &n : input_var_original_places) {
          const std::string &in_name = n.first;
          const phi::Place &original_place = n.second;
          // input vars exist in `scope` not `current_scope`
          TransferVariablePlace(&scope, in_name, original_place, dev_ctx);
249
        }
250 251 252 253 254 255

        for (auto &var_rename : rename_vars) {
          std::string input_var_name =
              var_rename.substr(0, var_rename.size() - strlen(kSuffix));
          current_scope.Rename(var_rename, input_var_name);
        }
256 257
        cond_data = GetCondData(
            scope.FindVar(Input(kCondition))->Get<phi::DenseTensor>());
258 259
      }
    } else {
Y
Yang Yang(Tony) 已提交
260
      auto &current_scope = scope.NewScope();
261

262 263
      BuildScopeForControlFlowOp(*core_, *block, &current_scope);
      core_->reset_scope(&current_scope);
264

265
      while (cond_data) {
266 267
        for (auto &name : current_scope.LocalVarNames()) {
          auto *var = current_scope.Var(name);
268
          if (var->IsType<phi::DenseTensor>()) {
269
            // Clear all lod information for all lod_tensors.
270
            auto *t = var->GetMutable<phi::DenseTensor>();
271 272 273 274 275 276 277 278
            framework::LoD empty_lod;
            t->set_lod(empty_lod);
          } else if (var->IsType<framework::LoDTensorArray>()) {
            // Clear elements of all tensor arrays.
            auto *t = var->GetMutable<framework::LoDTensorArray>();
            t->clear();
          }
        }
279

280
        core_->Run({}, false);
281

282 283
        cond_data = GetCondData(
            scope.FindVar(Input(kCondition))->Get<phi::DenseTensor>());
C
chengduo 已提交
284
      }
H
hong 已提交
285

286
      scope.DeleteScope(&current_scope);
Y
Yang Yang(Tony) 已提交
287 288
    }
  }
289 290 291 292 293

 private:
  mutable std::shared_ptr<framework::Executor> executor_{nullptr};
  mutable std::unique_ptr<framework::ExecutorPrepareContext> ctx_{nullptr};
  mutable std::shared_ptr<framework::InterpreterCore> core_{nullptr};
Y
Yang Yang(Tony) 已提交
294 295 296 297
};

class WhileOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
298
  void Make() override {
Y
Yang Yu 已提交
299
    AddInput(kX,
Y
Yang Yang(Tony) 已提交
300 301 302 303 304 305 306
             "A set of variables, which are required by operators inside the "
             "block of While Op.")
        .AsDuplicable();
    AddInput(
        kCondition,
        "(Bool) An scalar. When it's False, the While Op will be terminated.")
        .AsDuplicable();
Y
Yang Yang(Tony) 已提交
307
    AddOutput(kOutputs,
Y
Yang Yang(Tony) 已提交
308
              "A set of variables, which will be assigned with values "
Y
Yang Yang(Tony) 已提交
309
              "generated by the operators inside the block of While Op.")
Y
Yang Yang(Tony) 已提交
310 311 312 313 314
        .AsDuplicable();
    AddOutput(kStepScopes,
              "(StepScopeVar) A vector of local scope, which size equals the "
              "step number of While Op. The i'th scope storages temporary "
              "variables generated in the i'th step.");
Y
Yu Yang 已提交
315 316
    AddAttr<framework::BlockDesc *>(kStepBlock,
                                    "The step block inside WhileOp");
317 318 319 320
    AddAttr<bool>("is_test",
                  "(bool, default false) Set to true for inference only, false "
                  "for training. Some layers may run faster when this is true.")
        .SetDefault(false);
Y
Yang Yang(Tony) 已提交
321 322 323 324 325 326 327
    AddComment(R"DOC(
)DOC");
  }
};

class WhileGradOp : public framework::OperatorBase {
 public:
328 329
  WhileGradOp(const std::string &type,
              const framework::VariableNameMap &inputs,
Y
Yang Yang(Tony) 已提交
330 331 332 333
              const framework::VariableNameMap &outputs,
              const framework::AttributeMap &attrs)
      : framework::OperatorBase(type, inputs, outputs, attrs) {}

334 335 336
 private:
  void RunImpl(const framework::Scope &scope,
               const platform::Place &dev_place) const override {
337
    PADDLE_ENFORCE_EQ(
338 339
        Attr<bool>("is_test"),
        false,
340 341
        platform::errors::InvalidArgument(
            "WhileGradOp is only callable when is_test is false."));
342 343 344
    // get device context from pool
    platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
    auto &dev_ctx = *pool.Get(dev_place);
345

Y
Yu Yang 已提交
346
    auto *block = Attr<framework::BlockDesc *>(kStepBlock);
H
hong 已提交
347
    auto *parent_block = block->ParentBlock();
S
sneaxiy 已提交
348 349

    auto &skip_vars = Attr<std::vector<std::string>>(kSkipEagerDeletionVars);
S
sneaxiy 已提交
350
    VLOG(2) << GetSkipEagerDeletionVarsDebugString(skip_vars);
Y
Yang Yang(Tony) 已提交
351 352 353 354

    auto *step_scopes =
        scope.FindVar(Input(kStepScopes))->GetMutable<StepScopeVar>();

Y
Yang Yang(Tony) 已提交
355 356 357 358
    auto outside_og_names = Inputs(framework::GradVarName(kOutputs));
    auto inside_og_names =
        Attr<std::vector<std::string>>("original_output_grad");

359 360
    PADDLE_ENFORCE_EQ(outside_og_names.size(),
                      inside_og_names.size(),
361 362 363 364 365 366
                      platform::errors::InvalidArgument(
                          "The number of original output gradient names "
                          "does not match the number of backward input "
                          "gradient names. The number of Backward input "
                          "names is %d and the numbers of original output "
                          "gradient names is %d.",
367 368
                          outside_og_names.size(),
                          inside_og_names.size()));
Y
Yang Yang(Tony) 已提交
369

370 371 372 373 374 375 376 377 378 379 380 381 382 383
    LOG_FIRST_N(INFO, 1)
        << "[ControlFlow][WhileGradOp] New Executor is Running.";
    if (!core_ || !platform::is_same_place(core_->GetPlace(), dev_place)) {
      std::set<std::string> skip_gc_vars(skip_vars.begin(), skip_vars.end());
      framework::Scope placeholder;  // Don't care if it's valid, just for
                                     // initialize InterpreterCore
      framework::interpreter::ExecutionConfig execution_config;
      execution_config.create_local_scope = false;
      execution_config.used_for_control_flow_op = true;
      execution_config.skip_gc_vars =
          std::set<std::string>(skip_vars.begin(), skip_vars.end());

      core_.reset(new framework::InterpreterCore(
          dev_place, *block, &placeholder, execution_config));
384 385
    }

Y
Yang Yang(Tony) 已提交
386
    for (auto cur_scope_iter = step_scopes->rbegin();
387 388
         cur_scope_iter != step_scopes->rend();
         ++cur_scope_iter) {
M
minqiyang 已提交
389 390
      VLOG(3) << "Start backward at time_step "
              << cur_scope_iter - step_scopes->rbegin();
Y
Yang Yang(Tony) 已提交
391 392 393 394 395
      framework::Scope &cur_scope = **cur_scope_iter;
      // Link OG from outside to inside
      for (size_t i = 0; i < outside_og_names.size(); ++i) {
        auto outside_og_name = outside_og_names[i];
        auto inside_og_name = inside_og_names[i];
M
minqiyang 已提交
396 397
        VLOG(8) << "Linking outside " << outside_og_name << " --> inside "
                << inside_og_name;
C
chengduo 已提交
398 399 400 401
        if (scope.FindVar(outside_og_name) == nullptr) {
          continue;
        }

H
hong 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
        if (cur_scope_iter == step_scopes->rbegin()) {
          auto &og_outside = *scope.FindVar(outside_og_name);
          if (og_outside.IsType<phi::DenseTensor>() &&
              !og_outside.GetMutable<phi::DenseTensor>()->IsInitialized()) {
            auto *var_desc = parent_block->FindVarRecursive(outside_og_name);
            PADDLE_ENFORCE_NOT_NULL(var_desc,
                                    platform::errors::PreconditionNotMet(
                                        "Var `%s` is not found in parent "
                                        "block, can't fill constant.",
                                        outside_og_name));
            auto shape = var_desc->GetShape();
            VLOG(8) << "Found uninitialized tensor " << outside_og_name
                    << " in step 0, fill it with 0.0f. dims="
                    << phi::make_ddim(shape);
            framework::AttributeMap attrs;
            attrs["dtype"] = var_desc->GetDataType();
            attrs["shape"] = phi::vectorize<int>(phi::make_ddim(shape));
            attrs["value"] = 0.0f;

            auto var_name = outside_og_name;
            auto zero_op =
                framework::OpRegistry::CreateOp("fill_constant",
                                                framework::VariableNameMap{},
                                                {{"Out", {var_name}}},
                                                attrs);
            zero_op->Run(scope, dev_place);
          }
        }

431 432
        auto &og_outside = *scope.FindVar(outside_og_name);
        auto &og_inside = *cur_scope.Var(inside_og_name);
433 434 435
        if (og_outside.IsType<phi::DenseTensor>()) {
          auto &outside_tensor = og_outside.Get<phi::DenseTensor>();
          auto &inside_tensor = *og_inside.GetMutable<phi::DenseTensor>();
Y
Yang Yang(Tony) 已提交
436 437
          inside_tensor.set_lod(outside_tensor.lod());
          inside_tensor.ShareDataWith(outside_tensor);
S
sneaxiy 已提交
438
        } else if (og_outside.IsType<framework::LoDTensorArray>()) {
439 440
          auto outside_array =
              og_outside.GetMutable<framework::LoDTensorArray>();
Y
Yang Yang(Tony) 已提交
441
          auto &inside_array =
442
              *og_inside.GetMutable<framework::LoDTensorArray>();
443 444 445
          inside_array.clear();
          inside_array.resize(outside_array->size());
          VLOG(8) << outside_og_name << " size = " << outside_array->size();
Y
Yang Yang(Tony) 已提交
446 447

          for (size_t j = 0; j < inside_array.size(); ++j) {
448 449 450 451 452 453 454
            if (!outside_array->at(j).IsInitialized()) {
              outside_array->at(j).Resize({0});
            }
            VLOG(8) << j << " " << outside_array->at(j).numel();
            if (outside_array->at(j).numel() != 0) {
              inside_array[j].set_lod(outside_array->at(j).lod());
              inside_array[j].ShareDataWith(outside_array->at(j));
Y
Yang Yang(Tony) 已提交
455
            } else {
456
              PADDLE_ENFORCE_EQ(
457 458
                  inside_array[j].numel(),
                  0,
459 460 461
                  platform::errors::InvalidArgument(
                      "The numel of %d-th element of var %s (LoDTensorArray) "
                      "in while block must be 0, but received its numel is %d.",
462 463 464
                      j,
                      inside_og_name,
                      inside_array[j].numel()));
Y
Yang Yang(Tony) 已提交
465 466
            }
          }
C
chengduo 已提交
467
        } else {
468
          PADDLE_THROW(platform::errors::Unimplemented(
469 470
              "Currently only support phi::DenseTensor and "
              "phi::DenseTensorArray in "
471
              "WhileGradOp."));
Y
Yang Yang(Tony) 已提交
472 473
        }
      }
474

475 476 477
      BuildScopeForControlFlowOp(*core_, *block, *cur_scope_iter);
      core_->reset_scope(*cur_scope_iter);
      core_->Run({}, false);
Y
Yang Yang(Tony) 已提交
478

C
chengduo 已提交
479 480 481
      // The Outputs(kXGRAD) contains the names of the gradient of parameters
      // and inputs.
      auto &pg_ig_names = Outputs(kXGRAD);
Y
Yang Yu 已提交
482
      auto &p_names = Inputs(kX);
483 484
      PADDLE_ENFORCE_EQ(pg_ig_names.size(),
                        p_names.size(),
485 486 487 488 489
                        platform::errors::PreconditionNotMet(
                            "The number of names in Outputs(X@GRAD) does not "
                            "match the number of names in Inputs(X). The "
                            "number of names in Outputs(X@GRAD) is %d and "
                            "the number of names in Inputs(X) is %d.",
490 491
                            pg_ig_names.size(),
                            p_names.size()));
C
chengduo 已提交
492 493
      for (size_t param_id = 0; param_id < pg_ig_names.size(); ++param_id) {
        if (pg_ig_names[param_id] == framework::kEmptyVarName) {
494
          continue;  // parameter doesn't have gradient
Y
Yang Yang(Tony) 已提交
495 496
        }
        auto inside_grad_name = framework::GradVarName(p_names[param_id]);
Y
Yang Yang(Tony) 已提交
497

C
chengduo 已提交
498 499 500 501
        // for some grad_op, their input doesn't have gradient,
        // for example lookup_table_grad_op, the input(Idx) doesn't have
        // gradient.
        auto pg_ig_var = cur_scope.FindVar(inside_grad_name);
502
        PADDLE_ENFORCE_NOT_NULL(
503 504 505
            pg_ig_var,
            platform::errors::NotFound("Variable %s is not found.",
                                       inside_grad_name));
C
chengduo 已提交
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
        if (pg_ig_var->IsType<framework::LoDTensorArray>()) {
          auto pg_ig_lod_t_arr =
              pg_ig_var->GetMutable<framework::LoDTensorArray>();
          bool empty = true;
          for (auto &each : *pg_ig_lod_t_arr) {
            if (each.numel() != 0) {
              empty = false;
              break;
            }
          }
          if (empty) {
            LOG(WARNING) << pg_ig_names[param_id]
                         << " is not found in cur_scope.";
            continue;
          }
        }

Y
Yang Yang(Tony) 已提交
523
        //  // TODO(tonyyang-svail): Not sure we need the following
Y
Yang Yang(Tony) 已提交
524 525 526 527 528 529 530 531
        //  // If does not compute gradient of that variable inside rnn,
        //  just
        //  // continue
        //  if (local_var_names.find(inside_grad_name) ==
        //  local_var_names.end()) {
        //    continue;
        //  }

H
hong 已提交
532 533 534 535
        auto is_var_input_and_output =
            std::find(outside_og_names.begin(),
                      outside_og_names.end(),
                      pg_ig_names[param_id]) != outside_og_names.end();
536

Y
Yang Yang(Tony) 已提交
537 538 539
        // zero gradient variable in step 0
        if (cur_scope_iter == step_scopes->rbegin()) {
          auto *var = (*cur_scope_iter)->FindVar(inside_grad_name);
540
          PADDLE_ENFORCE_NOT_NULL(
541 542 543
              var,
              platform::errors::NotFound("Variable %s is not found.",
                                         inside_grad_name));
544
          PADDLE_ENFORCE_EQ(
C
chengduoZH 已提交
545
              var->IsType<framework::LoDTensorArray>() ||
546
                  var->IsType<phi::DenseTensor>(),
547 548 549
              true,
              platform::errors::InvalidArgument(
                  "Currently the type of var only can be LoDTensorArray, "
550
                  "or phi::DenseTensor, but the received var[%s] is %s.",
551 552
                  inside_grad_name,
                  framework::ToTypeName(var->Type())));
C
chengduo 已提交
553

H
hong 已提交
554
          if (!is_var_input_and_output && var->IsType<phi::DenseTensor>()) {
555
            auto &inside_tensor = var->Get<phi::DenseTensor>();
Y
Yang Yang(Tony) 已提交
556
            framework::AttributeMap attrs;
557 558
            attrs["dtype"] =
                framework::TransToProtoVarType(inside_tensor.dtype());
559
            attrs["shape"] = phi::vectorize<int>(inside_tensor.dims());
Y
Yang Yang(Tony) 已提交
560 561
            attrs["value"] = 0.0f;

C
chengduo 已提交
562
            auto var_name = pg_ig_names[param_id];
563 564 565 566 567
            auto zero_op =
                framework::OpRegistry::CreateOp("fill_constant",
                                                framework::VariableNameMap{},
                                                {{"Out", {var_name}}},
                                                attrs);
D
dzhwinter 已提交
568
            zero_op->Run(scope, dev_place);
569 570
            scope.FindVar(var_name)->GetMutable<phi::DenseTensor>()->set_lod(
                inside_tensor.lod());
Y
Yang Yang(Tony) 已提交
571 572
          }
        }
H
hong 已提交
573
        if (!is_var_input_and_output) {
574 575
          auto new_inside_name = cur_scope.Rename(inside_grad_name);
          auto sum_op = framework::OpRegistry::CreateOp(
576 577
              "sum",
              {{"X", {pg_ig_names[param_id], new_inside_name}}},
578 579 580 581
              {{"Out", {pg_ig_names[param_id]}}},
              framework::AttributeMap{{"use_mkldnn", {false}}});
          sum_op->Run(cur_scope, dev_place);
          cur_scope.Rename(new_inside_name, inside_grad_name);
H
hong 已提交
582 583
        } else {
          ShareVariable(cur_scope, scope, pg_ig_names[param_id]);
584
        }
Y
Yang Yang(Tony) 已提交
585
      }
586 587
      dev_ctx.Wait();
      const_cast<framework::Scope &>(scope).DeleteScope(&cur_scope);
Y
Yang Yang(Tony) 已提交
588
    }
589
    step_scopes->clear();
Y
Yang Yang(Tony) 已提交
590
  }
591

H
hong 已提交
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
  void ShareVariable(const framework::Scope &source,
                     const framework::Scope &dest,
                     std::string name) const {
    auto from_var = source.FindVar(name);
    auto to_var = dest.FindVar(name);
    if (from_var->IsType<phi::DenseTensor>()) {
      if (from_var->Get<phi::DenseTensor>().IsInitialized()) {
        to_var->GetMutable<phi::DenseTensor>()->ShareDataWith(
            from_var->Get<phi::DenseTensor>());
      }
    } else if (from_var->IsType<framework::LoDTensorArray>()) {
      auto from_arr = from_var->GetMutable<framework::LoDTensorArray>();
      auto to_arr = to_var->GetMutable<framework::LoDTensorArray>();
      to_arr->clear();
      to_arr->resize(from_arr->size());
      for (size_t i = 0; i < to_arr->size(); ++i) {
        if (from_arr->at(i).IsInitialized()) {
          to_arr->at(i).ShareDataWith(from_arr->at(i));
        }
      }
    }
  }

615 616 617 618
 private:
  mutable std::shared_ptr<framework::Executor> executor_{nullptr};
  mutable std::unique_ptr<framework::ExecutorPrepareContext> ctx_{nullptr};
  mutable std::shared_ptr<framework::InterpreterCore> core_{nullptr};
Y
Yang Yang(Tony) 已提交
619 620
};

H
hong 已提交
621 622
template <typename T>
class WhileGradOpMaker : public framework::SingleGradOpMaker<T> {
Y
Yang Yang(Tony) 已提交
623
 public:
H
hong 已提交
624
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
Y
Yang Yang(Tony) 已提交
625 626

 protected:
627
  void Apply(GradOpPtr<T> while_grad) const override {
F
Update  
fengjiayi 已提交
628
    while_grad->SetType("while_grad");
H
hong 已提交
629 630 631
    while_grad->SetInput(kX, this->Input(kX));
    while_grad->SetInput(kOutputs, this->Output(kOutputs));
    while_grad->SetInput(kStepScopes, this->Output(kStepScopes));
F
Update  
fengjiayi 已提交
632 633

    auto *grad_block = this->grad_block_[0];
Y
Yu Yang 已提交
634 635
    auto *fwd_block = grad_block->ForwardBlock();
    auto *parent_block = grad_block->ParentBlock();
636 637 638

    // Not all of IGs will be generated by inner gradient operators of while op.
    // Ignore IGs that is not generated by the inside block.
F
Update  
fengjiayi 已提交
639 640 641 642
    std::unordered_set<std::string> inner_op_outputs;
    for (const auto *op : grad_block->AllOps()) {
      for (auto &oname : op->OutputArgumentNames()) {
        inner_op_outputs.insert(oname);
643 644
      }
    }
H
hong 已提交
645 646
    auto igs = this->InputGrad(kX, /*do not drop empty gradient*/ false);

647
    for (auto &each_ig : igs) {
F
Update  
fengjiayi 已提交
648
      if (inner_op_outputs.find(each_ig) == inner_op_outputs.end()) {
M
minqiyang 已提交
649
        VLOG(8) << "Ignore " << each_ig;
650 651 652
        each_ig = framework::kEmptyVarName;
      }
    }
F
Update  
fengjiayi 已提交
653
    while_grad->SetOutput(framework::GradVarName(kX), igs);
Y
Yang Yang(Tony) 已提交
654 655 656 657

    // OG should be re-calculated by step blocks, since many outputs of while op
    // do not need to calculate gradients.
    std::unordered_set<std::string> block_ins;
H
hong 已提交
658 659
    block_ins.reserve(this->Input(kX).size() + this->Output(kOutputs).size());
    for (auto &p : this->Input(kX)) {
F
fengjiayi 已提交
660 661
      block_ins.insert(p);
    }
H
hong 已提交
662
    for (auto &o : this->Output(kOutputs)) {
F
fengjiayi 已提交
663 664
      block_ins.insert(o);
    }
Y
Yu Yang 已提交
665
    std::unordered_set<std::string> output_grads;
H
hong 已提交
666

F
Update  
fengjiayi 已提交
667 668 669 670
    for (const auto *op : grad_block->AllOps()) {
      for (auto &input_name : op->InputArgumentNames()) {
        // If the input of Op has been recorded or is generated by the forward
        // block, do not make it as input again.
Y
Yu Yang 已提交
671 672 673

        // The input is located in I/O or other op's outputs or the variable is
        // located in grad_block's parents
F
Update  
fengjiayi 已提交
674
        if (block_ins.find(input_name) != block_ins.end() ||
Y
Yu Yang 已提交
675 676
            (fwd_block->FindVarRecursive(input_name) != nullptr ||
             parent_block->FindVarRecursive(input_name) != nullptr)) {
Y
Yang Yang(Tony) 已提交
677 678
          continue;
        }
Y
Yu Yang 已提交
679
        output_grads.insert(input_name);
Y
Yang Yang(Tony) 已提交
680
      }
F
Update  
fengjiayi 已提交
681
      for (auto &output_name : op->OutputArgumentNames()) {
Y
Yang Yang(Tony) 已提交
682
        block_ins.insert(output_name);
Y
Yang Yang(Tony) 已提交
683 684
      }
    }
Y
Yang Yang(Tony) 已提交
685

Y
Yu Yang 已提交
686 687
    std::vector<std::string> output_grads_list;
    output_grads_list.resize(output_grads.size());
688 689
    std::copy(
        output_grads.begin(), output_grads.end(), output_grads_list.begin());
Y
Yu Yang 已提交
690
    while_grad->SetInput(framework::GradVarName(kOutputs), output_grads_list);
F
Update  
fengjiayi 已提交
691 692

    while_grad->SetAttrMap(this->Attrs());
A
Abhinav Arora 已提交
693
    while_grad->SetBlockAttr(kStepBlock, grad_block);
Y
Yang Yang(Tony) 已提交
694 695
    // record the original output gradient names, since the gradient name of
    // while operator could be renamed.
Y
Yu Yang 已提交
696
    while_grad->SetAttr("original_output_grad", output_grads_list);
Y
Yang Yang(Tony) 已提交
697

S
sneaxiy 已提交
698
    while_grad->SetAttr(kSkipEagerDeletionVars, std::vector<std::string>());
Y
Yang Yang(Tony) 已提交
699 700 701
  }
};

702 703
class WhileGradOpVarTypeInference
    : public framework::StaticGraphVarTypeInference {
Y
Yang Yang(Tony) 已提交
704
 public:
M
minqiyang 已提交
705
  void operator()(framework::InferVarTypeContext *ctx) const override {
706 707
    auto p_names = Input(ctx, kX);
    auto pg_ig_names = Output(ctx, framework::GradVarName(kX));
Y
Yang Yang(Tony) 已提交
708 709

    for (size_t i = 0; i < p_names.size(); ++i) {
710
      if (HasVar(ctx, pg_ig_names[i])) {
M
minqiyang 已提交
711
        VLOG(5) << "Setting " << pg_ig_names[i] << " following " << p_names[i]
712 713 714
                << " type: " << GetType(ctx, p_names[i]);
        SetType(ctx, pg_ig_names[i], GetType(ctx, p_names[i]));
        SetDataType(ctx, pg_ig_names[i], GetDataType(ctx, p_names[i]));
Y
Yang Yang(Tony) 已提交
715 716 717 718 719 720 721 722
      }
    }
  }
};

class WhileGradOpShapeInference : public framework::InferShapeBase {
 public:
  void operator()(framework::InferShapeContext *ctx) const override {
Y
Yang Yu 已提交
723 724
    ctx->HasInputs(kX);
    ctx->HasOutputs(framework::GradVarName(kX));
Y
Yang Yang(Tony) 已提交
725 726
    ctx->HasInputs(kOutputs);
    ctx->HasInputs(framework::GradVarName(kOutputs));
C
chengduo 已提交
727
    auto pg_ig_names = ctx->Outputs(kXGRAD);
728 729
    auto in_var_ptrs = ctx->GetInputVarPtrs(kX);
    auto out_var_ptrs = ctx->GetOutputVarPtrs(kXGRAD);
730 731
    PADDLE_ENFORCE_EQ(in_var_ptrs.size(),
                      out_var_ptrs.size(),
732 733 734
                      platform::errors::InvalidArgument(
                          "The size of Inputs(X) must be the same as "
                          "the size of Outputs(X@GRAD)."));
X
Xin Pan 已提交
735 736

    for (size_t i = 0; i < in_var_ptrs.size(); ++i) {
C
chengduo 已提交
737
      if (pg_ig_names[i] == framework::kEmptyVarName) {
Y
Yang Yang(Tony) 已提交
738 739
        continue;
      }
740
      framework::VarDesc *in_var =
R
Ruibiao Chen 已提交
741 742
          PADDLE_GET(framework::VarDesc *, in_var_ptrs[i]);
      PADDLE_GET(framework::VarDesc *, out_var_ptrs[i])
743
          ->SetShape(in_var->GetShape());
Y
Yang Yang(Tony) 已提交
744 745 746 747
    }
  }
};

Y
Yang Yang(Tony) 已提交
748 749 750
}  // namespace operators
}  // namespace paddle

H
hong 已提交
751
REGISTER_OPERATOR(
752 753 754
    while,
    paddle::operators::WhileOp,
    paddle::operators::WhileOpMaker,
H
hong 已提交
755
    paddle::operators::WhileGradOpMaker<paddle::framework::OpDesc>);
756 757
REGISTER_OPERATOR(while_grad,
                  paddle::operators::WhileGradOp,
Y
Yang Yang(Tony) 已提交
758 759
                  paddle::operators::WhileGradOpShapeInference,
                  paddle::operators::WhileGradOpVarTypeInference);