composite_grad_desc_maker.h 23.4 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>

#include "paddle/fluid/framework/op_call_stack.h"
#include "paddle/fluid/framework/op_desc.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/prim/utils/static/desc_tensor.h"
#include "paddle/fluid/prim/utils/static/static_global_utils.h"
31
#include "paddle/phi/api/include/tensor.h"
J
Jiabin Yang 已提交
32
#include "paddle/phi/core/enforce.h"
33 34 35 36
#include "paddle/phi/core/flags.h"

DECLARE_string(tensor_operants_mode);

J
Jiabin Yang 已提交
37 38 39 40 41
namespace paddle {
namespace prim {

/*
  This functor class is responsible for creating the gradient ops for the given
J
Jiabin Yang 已提交
42 43 44
  operator fwd_op_. After it is called (through operator()), the pairs of
  (gradient variable, corresponding input variable of fwd_op_) will be added to
  grad_to_var. If an input variable of fwd_op_ is contained in no_grad_set, its
J
Jiabin Yang 已提交
45 46 47 48
  gradient variable will be ignored or kEmptyVarName depending on the template
  argument DropEmptyIG in the derived classes.
 */

49
class CompositeGradOpMakerBase {
J
Jiabin Yang 已提交
50
 public:
51
  explicit CompositeGradOpMakerBase(
J
Jiabin Yang 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64
      const framework::OpDesc& fwd_op,
      const std::unordered_set<std::string>& no_grad_set,
      std::unordered_map<std::string, std::string>* grad_to_var,
      const framework::BlockDesc* original_block,
      const std::vector<framework::BlockDesc*>& grad_block =
          std::vector<framework::BlockDesc*>())
      : fwd_op_(fwd_op),
        no_grad_set_(no_grad_set),
        grad_to_var_(grad_to_var),
        original_block_(original_block),
        acting_program_(framework::ProgramDesc()),
        grad_block_(grad_block) {
    // TODO(jiabin): This should always execute by one thread...
J
Jiabin Yang 已提交
65 66
    VLOG(6) << "Constructing Composite Grad func for " << fwd_op_.Type()
            << "_grad ";
67
    FLAGS_tensor_operants_mode = "static";
J
Jiabin Yang 已提交
68 69 70 71
    StaticCompositeContext::Instance().SetBlock(
        acting_program_.MutableBlock(0));
  }

72
  virtual ~CompositeGradOpMakerBase() = default;
J
Jiabin Yang 已提交
73 74

  virtual std::vector<std::unique_ptr<framework::OpDesc>> operator()() {
J
Jiabin Yang 已提交
75
    VLOG(3) << "Runing Composite Grad func for " << fwd_op_.Type() << "_grad ";
J
Jiabin Yang 已提交
76 77 78 79 80 81 82 83 84 85 86 87
    this->Apply();
    std::vector<std::unique_ptr<framework::OpDesc>> ops;
    // TODO(jiabin): Support multiple blocks later
    for (auto* op : StaticCompositeContext::Instance().GetBlock()->AllOps()) {
      ops.emplace_back(new framework::OpDesc(*op));
      ops.back()->ResetBlock();
    }
    return ops;
  }

  virtual void Apply() = 0;

88
  paddle::Tensor GetSingleForwardOutput(const std::string& name) {
J
Jiabin Yang 已提交
89
    framework::VarDesc* out_desc = this->SingleForwardOutput(name);
90
    paddle::Tensor out = paddle::Tensor(std::make_shared<DescTensor>(out_desc));
J
Jiabin Yang 已提交
91 92 93
    return out;
  }

94 95
  paddle::Tensor GetSingleForwardInput(const std::string& name) {
    paddle::Tensor input = paddle::Tensor(
J
Jiabin Yang 已提交
96 97 98 99
        std::make_shared<DescTensor>(this->SingleForwardInput(name)));
    return input;
  }

100 101
  paddle::Tensor GetSingleOutputGrad(const std::string& name) {
    paddle::Tensor output_grad = paddle::Tensor(
J
Jiabin Yang 已提交
102 103 104 105
        std::make_shared<DescTensor>(this->SingleOutputGrad(name)));
    return output_grad;
  }

106 107
  // TODO(Ruting): modify name to GetNullableSingleInputGrad after Large-scale
  // development
108
  paddle::Tensor GetSingleInputGrad(const std::string& name) {
J
Jiabin Yang 已提交
109
    framework::VarDesc* input_grad_desc = this->SingleInputGrad(name);
110 111 112
    if (!input_grad_desc) return paddle::Tensor();
    paddle::Tensor input_grad =
        paddle::Tensor(std::make_shared<DescTensor>(input_grad_desc));
J
Jiabin Yang 已提交
113 114 115
    return input_grad;
  }

116
  paddle::optional<paddle::Tensor> GetOptionalSingleForwardOutput(
J
Jiabin Yang 已提交
117
      const std::string& name) {
118
    paddle::optional<paddle::Tensor> output_opt;
J
Jiabin Yang 已提交
119 120 121
    if (fwd_op_.Outputs().find(name) != fwd_op_.Outputs().end()) {
      framework::VarDesc* output_desc = this->SingleForwardOutput(name);
      if (!output_desc) return output_opt;
122 123 124
      paddle::Tensor output =
          paddle::Tensor(std::make_shared<DescTensor>(output_desc));
      output_opt = paddle::make_optional<paddle::Tensor>(output);
J
Jiabin Yang 已提交
125
    }
J
Jiabin Yang 已提交
126 127 128
    return output_opt;
  }

129
  paddle::optional<paddle::Tensor> GetOptionalSingleForwardInput(
J
Jiabin Yang 已提交
130
      const std::string& name) {
131
    paddle::optional<paddle::Tensor> input_opt;
J
Jiabin Yang 已提交
132 133 134
    if (fwd_op_.Inputs().find(name) != fwd_op_.Inputs().end()) {
      framework::VarDesc* input_desc = this->SingleForwardInput(name);
      if (!input_desc) return input_opt;
135 136 137
      paddle::Tensor input =
          paddle::Tensor(std::make_shared<DescTensor>(input_desc));
      input_opt = paddle::make_optional<paddle::Tensor>(input);
J
Jiabin Yang 已提交
138
    }
J
Jiabin Yang 已提交
139 140 141
    return input_opt;
  }

142
  paddle::optional<paddle::Tensor> GetOptionalSingleOutputGrad(
J
Jiabin Yang 已提交
143
      const std::string& name) {
144
    paddle::optional<paddle::Tensor> output_grad_opt;
J
Jiabin Yang 已提交
145 146 147
    if (fwd_op_.Outputs().find(name) != fwd_op_.Outputs().end()) {
      framework::VarDesc* output_grad_desc = this->SingleOutputGrad(name);
      if (!output_grad_desc) return output_grad_opt;
148 149 150
      paddle::Tensor output_grad =
          paddle::Tensor(std::make_shared<DescTensor>(output_grad_desc));
      output_grad_opt = paddle::make_optional<paddle::Tensor>(output_grad);
J
Jiabin Yang 已提交
151
    }
J
Jiabin Yang 已提交
152 153 154
    return output_grad_opt;
  }

155 156
  std::vector<paddle::Tensor> GetMultiForwardOutput(const std::string& name) {
    std::vector<paddle::Tensor> outputs;
J
Jiabin Yang 已提交
157 158 159 160
    std::vector<framework::VarDesc*> outputs_descs =
        this->MultiForwardOutput(name);
    outputs.reserve(outputs_descs.size());
    for (const auto& output_desc : outputs_descs) {
161 162
      outputs.emplace_back(
          paddle::Tensor(std::make_shared<DescTensor>(output_desc)));
J
Jiabin Yang 已提交
163 164 165 166
    }
    return outputs;
  }

167 168
  std::vector<paddle::Tensor> GetMultiForwardInput(const std::string& name) {
    std::vector<paddle::Tensor> inputs;
J
Jiabin Yang 已提交
169 170 171 172
    std::vector<framework::VarDesc*> inputs_descs =
        this->MultiForwardInput(name);
    inputs.reserve(inputs_descs.size());
    for (const auto& input_desc : inputs_descs) {
173 174
      inputs.emplace_back(
          paddle::Tensor(std::make_shared<DescTensor>(input_desc)));
J
Jiabin Yang 已提交
175 176 177 178
    }
    return inputs;
  }

179 180
  std::vector<paddle::Tensor> GetMultiOutputGrad(const std::string& name) {
    std::vector<paddle::Tensor> outputs_grads;
J
Jiabin Yang 已提交
181 182 183 184
    std::vector<framework::VarDesc*> outputs_grads_descs =
        this->MultiOutputGrad(name);
    outputs_grads.reserve(outputs_grads_descs.size());
    for (const auto& output_grad_desc : outputs_grads_descs) {
185 186
      outputs_grads.emplace_back(
          paddle::Tensor(std::make_shared<DescTensor>(output_grad_desc)));
J
Jiabin Yang 已提交
187 188 189 190
    }
    return outputs_grads;
  }

191 192
  std::vector<paddle::Tensor> GetMultiInputGrad(const std::string& name) {
    std::vector<paddle::Tensor> inputs_grads;
J
Jiabin Yang 已提交
193 194 195 196 197
    std::vector<framework::VarDesc*> inputs_grads_descs =
        this->MultiInputGrad(name);
    inputs_grads.reserve(inputs_grads_descs.size());
    for (const auto& input_grad_desc : inputs_grads_descs) {
      if (input_grad_desc) {
198 199
        inputs_grads.emplace_back(
            paddle::Tensor(std::make_shared<DescTensor>(input_grad_desc)));
J
Jiabin Yang 已提交
200
      } else {
201
        inputs_grads.emplace_back(paddle::Tensor());
J
Jiabin Yang 已提交
202 203 204 205 206
      }
    }
    return inputs_grads;
  }

207
  paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiForwardOutput(
208
      const std::string& name) {
209
    paddle::optional<std::vector<paddle::Tensor>> outputs_opt;
J
Jiabin Yang 已提交
210 211
    std::vector<framework::VarDesc*> outputs_descs =
        this->MultiForwardOutput(name);
212 213 214 215 216
    if ((outputs_descs.empty())) {
      return outputs_opt;
    }
    std::vector<paddle::Tensor> outputs;
    outputs.reserve(outputs_descs.size());
J
Jiabin Yang 已提交
217 218
    for (const auto& output_desc : outputs_descs) {
      if (output_desc) {
219
        outputs.emplace_back(paddle::Tensor(
220
            paddle::Tensor(std::make_shared<DescTensor>(output_desc))));
J
Jiabin Yang 已提交
221
      } else {
222
        outputs.emplace_back(paddle::Tensor(paddle::Tensor()));
J
Jiabin Yang 已提交
223 224
      }
    }
225
    outputs_opt = paddle::make_optional<std::vector<paddle::Tensor>>(outputs);
J
Jiabin Yang 已提交
226 227 228
    return outputs_opt;
  }

229
  paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiForwardInput(
230
      const std::string& name) {
231
    paddle::optional<std::vector<paddle::Tensor>> inputs_opt;
J
Jiabin Yang 已提交
232 233
    std::vector<framework::VarDesc*> inputs_descs =
        this->MultiForwardInput(name);
234 235 236 237 238
    if ((inputs_descs.empty())) {
      return inputs_opt;
    }
    std::vector<paddle::Tensor> inputs;
    inputs.reserve(inputs_descs.size());
J
Jiabin Yang 已提交
239 240
    for (const auto& input_desc : inputs_descs) {
      if (input_desc) {
241
        inputs.emplace_back(paddle::Tensor(
242
            paddle::Tensor(std::make_shared<DescTensor>(input_desc))));
J
Jiabin Yang 已提交
243
      } else {
244
        inputs.emplace_back(paddle::Tensor(paddle::Tensor()));
J
Jiabin Yang 已提交
245 246
      }
    }
247
    inputs_opt = paddle::make_optional<std::vector<paddle::Tensor>>(inputs);
J
Jiabin Yang 已提交
248 249 250
    return inputs_opt;
  }

251
  paddle::optional<std::vector<paddle::Tensor>> GetOptionalMultiOutputGrad(
252
      const std::string& name) {
253
    paddle::optional<std::vector<paddle::Tensor>> outputs_grads_opt;
J
Jiabin Yang 已提交
254 255
    std::vector<framework::VarDesc*> outputs_grads_descs =
        this->MultiOutputGrad(name);
256 257 258 259
    if ((outputs_grads_descs.empty())) {
      return outputs_grads_opt;
    }
    std::vector<paddle::Tensor> outputs_grads;
J
Jiabin Yang 已提交
260 261 262
    outputs_grads.reserve(outputs_grads_descs.size());
    for (const auto& output_grad_desc : outputs_grads_descs) {
      if (output_grad_desc) {
263
        outputs_grads.emplace_back(paddle::Tensor(
264
            paddle::Tensor(std::make_shared<DescTensor>(output_grad_desc))));
J
Jiabin Yang 已提交
265
      } else {
266
        outputs_grads.emplace_back(paddle::Tensor(paddle::Tensor()));
J
Jiabin Yang 已提交
267 268
      }
    }
269 270 271
    outputs_grads_opt =
        paddle::make_optional<std::vector<paddle::Tensor>>(outputs_grads);
    return outputs_grads_opt;
J
Jiabin Yang 已提交
272 273
  }

274
  paddle::Tensor* GetOutputPtr(paddle::Tensor* input) {
J
Jiabin Yang 已提交
275 276 277 278
    if (input->defined()) return input;
    return nullptr;
  }

279 280 281
  std::vector<paddle::Tensor*> GetOutputPtr(
      const std::vector<paddle::Tensor*>& inputs) {
    std::vector<paddle::Tensor*> output_ptrs;
J
Jiabin Yang 已提交
282 283 284 285 286 287 288 289 290 291
    output_ptrs.reserve(inputs.size());
    for (const auto& input : inputs) {
      if (input->defined())
        output_ptrs.emplace_back(input);
      else
        output_ptrs.emplace_back(nullptr);
    }
    return output_ptrs;
  }

292
  std::string GetOutputName(const paddle::Tensor& output) {
J
Jiabin Yang 已提交
293 294 295 296 297
    if (!output.defined()) return framework::kEmptyVarName;
    return static_cast<prim::DescTensor*>(output.impl().get())->Name();
  }

  std::vector<std::string> GetOutputName(
298
      const std::vector<paddle::Tensor>& outputs) {
J
Jiabin Yang 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
    std::vector<std::string> out_names;
    out_names.reserve(outputs.size());
    for (const auto& output : outputs) {
      if (!output.defined())
        out_names.emplace_back(framework::kEmptyVarName);
      else
        out_names.emplace_back(
            static_cast<prim::DescTensor*>(output.impl().get())->Name());
    }
    return out_names;
  }

 protected:
  void CopyVarFromOrig(const std::string& name) const {
    VLOG(6) << "Copy Var: " << name << "from block: " << original_block_
            << " to block: " << StaticCompositeContext::Instance().GetBlock();
    framework::VarDesc* original_var = original_block_->FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(
        original_var,
        phi::errors::InvalidArgument(
            "Can't find var: %s in block %s", name, original_block_));
    *StaticCompositeContext::Instance().GetBlock()->Var(name) = *original_var;
  }

  framework::VarDesc* SingleInputGrad(const std::string& name,
                                      bool drop_empty_grad = true) const {
325 326 327 328 329
    auto* var = this->SingleForwardInput(name);
    if (!var) {
      return nullptr;
    }
    auto var_name = var->Name();
J
Jiabin Yang 已提交
330 331 332 333 334 335 336 337 338
    auto grad_var_name = framework::GradVarName(var_name);
    if (no_grad_set_.empty() || !no_grad_set_.count(grad_var_name)) {
      (*this->grad_to_var_)[grad_var_name] = var_name;
      VLOG(8) << "Valid gradients: " << grad_var_name;
    } else {
      // TODO(jiabin): Will this cause fill zeros error?
      grad_var_name = framework::kEmptyVarName;
      if (drop_empty_grad) return nullptr;
    }
339

J
Jiabin Yang 已提交
340 341 342 343 344 345 346 347 348 349 350
    if (original_block_->HasVar(grad_var_name)) {
      // Copy Var from original block to active block, or create a new one.
      CopyVarFromOrig(grad_var_name);
      return StaticCompositeContext::Instance().GetBlock()->FindVar(
          grad_var_name);
    } else {
      return StaticCompositeContext::Instance().GetBlock()->Var(grad_var_name);
    }
  }

  framework::VarDesc* SingleOutputGrad(const std::string& name) const {
351 352 353 354 355 356 357 358
    auto* var = this->SingleForwardOutput(name);
    if (!var) {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "GetSingleOutputGrad for %s_grad faild, if it is Optional input,"
          "please use GetOptionalSingleOutputGrad replaced. ",
          name));
    }
    auto var_name = var->Name();
J
Jiabin Yang 已提交
359 360 361
    auto grad_var_name = framework::GradVarName(var_name);
    (*this->grad_to_var_)[grad_var_name] = var_name;
    VLOG(8) << "Valid gradients: " << grad_var_name;
362 363 364 365 366 367

    auto target_grad = StaticCompositeContext::Instance().GetTargetGradName();
    if (target_grad.find(grad_var_name) != target_grad.end()) {
      grad_var_name = target_grad.at(grad_var_name);
    }

J
Jiabin Yang 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
    if (original_block_->HasVar(grad_var_name)) {
      // Copy Var from original block to active block, or create a new one.
      CopyVarFromOrig(grad_var_name);
      return StaticCompositeContext::Instance().GetBlock()->FindVar(
          grad_var_name);
    } else {
      return StaticCompositeContext::Instance().GetBlock()->Var(grad_var_name);
    }
  }

  std::vector<framework::VarDesc*> MultiInputGrad(
      const std::string& name, bool drop_empty_grad = true) const {
    std::vector<std::string> ret_val;
    std::vector<framework::VarDesc*> input_grads;
    auto var_names = this->MultiForwardInputVarName(name);
    ret_val.reserve(var_names.size());
    std::transform(var_names.begin(),
                   var_names.end(),
                   std::back_inserter(ret_val),
                   [this](const std::string& fwd_var_name) -> std::string {
                     auto g_name = framework::GradVarName(fwd_var_name);
                     if (no_grad_set_.empty() || !no_grad_set_.count(g_name)) {
                       (*this->grad_to_var_)[g_name] = fwd_var_name;
                       return g_name;
                     } else {
                       return framework::kEmptyVarName;
                     }
                   });
    if (!drop_empty_grad) {
      for (const auto& name : ret_val) {
        if (original_block_->HasVar(name)) {
          // Copy Var from original block to active block, or create a new one.
          CopyVarFromOrig(name);
          input_grads.emplace_back(
              StaticCompositeContext::Instance().GetBlock()->FindVar(name));
        } else {
          input_grads.emplace_back(
              StaticCompositeContext::Instance().GetBlock()->Var(name));
        }
      }
      return input_grads;
    }
    PADDLE_ENFORCE_LE(
        var_names.size(),
        1UL,
        platform::errors::Unavailable(
            "BUG from operator developer:"
            " for input argument with a list of variables, "
            " drop_empty_grad is not allowed because it makes"
            " the correspondence bewteen a variable and its gradient"
            " ambiguous."));

    std::vector<std::string> dropped_ret_val;
    dropped_ret_val.reserve(ret_val.size());
    std::copy_if(
        ret_val.begin(),
        ret_val.end(),
        std::back_inserter(dropped_ret_val),
        [](const std::string& str) { return str != framework::kEmptyVarName; });
    for (const auto& name : dropped_ret_val) {
      // TODO(jiabin): Will this cause fill zeros error?
      if (original_block_->HasVar(name)) {
        // Copy Var from original block to active block, or create a new one.
        CopyVarFromOrig(name);
        input_grads.emplace_back(
            StaticCompositeContext::Instance().GetBlock()->FindVar(name));
      } else {
        input_grads.emplace_back(
            StaticCompositeContext::Instance().GetBlock()->Var(name));
      }
    }
    return input_grads;
  }

  std::vector<framework::VarDesc*> MultiOutputGrad(
      const std::string& name) const {
    std::vector<std::string> ret_val;
    auto out_names = this->MultiForwardOutputVarName(name);
    ret_val.reserve(out_names.size());
    std::transform(out_names.begin(),
                   out_names.end(),
                   std::back_inserter(ret_val),
                   [this](const std::string& fwd_var_name) -> std::string {
                     auto g_name = framework::GradVarName(fwd_var_name);
                     (*this->grad_to_var_)[g_name] = fwd_var_name;
                     return g_name;
                   });
    std::vector<framework::VarDesc*> grad_out;
456 457 458 459 460
    for (auto name : ret_val) {
      auto target_grad = StaticCompositeContext::Instance().GetTargetGradName();
      if (target_grad.find(name) != target_grad.end()) {
        name = target_grad.at(name);
      }
J
Jiabin Yang 已提交
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
      // TODO(jiabin): Will this cause fill zeros error?
      if (original_block_->HasVar(name)) {
        // Copy Var from original block to active block, or create a new one.
        CopyVarFromOrig(name);
        grad_out.emplace_back(
            StaticCompositeContext::Instance().GetBlock()->FindVar(name));
      } else {
        grad_out.emplace_back(
            StaticCompositeContext::Instance().GetBlock()->Var(name));
      }
    }
    return grad_out;
  }

  framework::VarDesc* SingleForwardInput(const std::string& name) const {
    // Copy Var from original block to active block, or create a new one.
J
Jiabin Yang 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
    auto fwd_in_names = fwd_op_.Input(name);
    if (!fwd_in_names.empty()) {
      PADDLE_ENFORCE_EQ(
          fwd_in_names.size(),
          1,
          phi::errors::InvalidArgument(
              "When calling SingleForward for op: %s's Input: %s, we should "
              "only get one input tensor, but we got %d instead.",
              fwd_op_.Type(),
              name,
              fwd_in_names.size()));
      CopyVarFromOrig(fwd_op_.Input(name).at(0));
      return StaticCompositeContext::Instance().GetBlock()->FindVar(
          fwd_op_.Input(name).at(0));
    } else {
      return nullptr;
    }
J
Jiabin Yang 已提交
494 495 496 497
  }

  framework::VarDesc* SingleForwardOutput(const std::string& name) const {
    // Copy Var from original block to active block, or create a new one.
J
Jiabin Yang 已提交
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
    auto fwd_out_names = fwd_op_.Output(name);
    if (!fwd_out_names.empty()) {
      PADDLE_ENFORCE_EQ(
          fwd_out_names.size(),
          1,
          phi::errors::InvalidArgument(
              "When calling SingleForward for op: %s's Output: %s, we should "
              "only get one input tensor, but we got %d instead.",
              fwd_op_.Type(),
              name,
              fwd_out_names.size()));
      CopyVarFromOrig(fwd_op_.Output(name).at(0));
      return StaticCompositeContext::Instance().GetBlock()->FindVar(
          fwd_op_.Output(name).at(0));
    } else {
      return nullptr;
    }
J
Jiabin Yang 已提交
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
  }

  std::vector<framework::VarDesc*> MultiForwardInput(
      const std::string& name) const {
    std::vector<framework::VarDesc*> result;
    for (const auto& n : fwd_op_.Input(name)) {
      // Copy Var from original block to active block, or create a new one.
      CopyVarFromOrig(n);
      result.emplace_back(
          StaticCompositeContext::Instance().GetBlock()->FindVar(n));
    }
    return result;
  }

  std::vector<framework::VarDesc*> MultiForwardOutput(
      const std::string& name) const {
    std::vector<framework::VarDesc*> result;
    for (const auto& n : fwd_op_.Output(name)) {
      // Copy Var from original block to active block, or create a new one.
      CopyVarFromOrig(n);
      result.emplace_back(
          StaticCompositeContext::Instance().GetBlock()->FindVar(n));
    }
    return result;
  }

541
  void RecoverOutputName(const paddle::Tensor& output,
J
Jiabin Yang 已提交
542 543
                         const std::string& origin_name) {
    if (origin_name == framework::kEmptyVarName) return;
544 545 546
    VLOG(4) << "Recover: "
            << static_cast<prim::DescTensor*>(output.impl().get())->Name()
            << " To: " << origin_name;
J
Jiabin Yang 已提交
547 548 549 550 551
    prim::StaticCompositeContext::Instance().GetBlock()->RenameVar(
        static_cast<prim::DescTensor*>(output.impl().get())->Name(),
        origin_name);
  }

552 553
  void RecoverOutputName(const std::vector<paddle::Tensor>& outputs,
                         const std::vector<std::string>& origin_names) {
J
Jiabin Yang 已提交
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
    PADDLE_ENFORCE_EQ(outputs.size(),
                      origin_names.size(),
                      platform::errors::InvalidArgument(
                          "The size of outputs must be equal to the size "
                          "of the origin_names.",
                          outputs.size(),
                          origin_names.size()));
    for (size_t i = 0; i < outputs.size(); ++i) {
      if (origin_names[i] == framework::kEmptyVarName) continue;
      prim::StaticCompositeContext::Instance().GetBlock()->RenameVar(
          static_cast<prim::DescTensor*>(outputs[i].impl().get())->Name(),
          origin_names[i]);
    }
  }

  std::vector<std::string> MultiForwardOutputVarName(
      const std::string& name) const {
    return fwd_op_.Output(name);
  }

  std::vector<std::string> MultiForwardInputVarName(
      const std::string& name) const {
    return fwd_op_.Input(name);
  }

  static std::vector<std::string> EmptyInput() { return {}; }

  static std::vector<std::string> EmptyOutput() { return {}; }

  static std::vector<std::string> EmptyInputGrad() { return {}; }

  static std::vector<std::string> EmptyOutputGrad() { return {}; }

  std::vector<std::string> InputNames() const {
    return this->fwd_op_.InputNames();
  }

  std::vector<std::string> OutputNames() const {
    return this->fwd_op_.OutputNames();
  }

  const std::unordered_map<std::string, framework::Attribute>& Attrs() const {
    return fwd_op_.GetAttrMap();
  }

  const std::unordered_map<std::string, framework::Attribute>& RuntimeAttrs()
      const {
    return fwd_op_.GetRuntimeAttrMap();
  }

  const framework::Attribute& GetAttr(const std::string& name) const {
    auto& map = fwd_op_.GetAttrMap();
    auto it = map.find(name);
    PADDLE_ENFORCE_NE(
        it,
        map.end(),
        platform::errors::NotFound("Cannot find attribute (%s).", name));
    return it->second;
  }

  template <typename T>
  inline const T& Attr(const std::string& name) const {
    return PADDLE_GET_CONST(T, GetAttr(name));
  }

  std::string ForwardOpType() const { return this->fwd_op_.Type(); }
  const framework::BlockDesc* GetForwardOpBlock() const {
    return fwd_op_.Block();
  }

 protected:
  bool HasInput(const std::string& name) const {
    return (fwd_op_.Inputs().count(name) > 0);
  }

  bool HasOutput(const std::string& name) const {
    return (fwd_op_.Outputs().count(name) > 0);
  }

 private:
  const framework::OpDesc& fwd_op_;
  const std::unordered_set<std::string>& no_grad_set_;
  std::unordered_map<std::string, std::string>* grad_to_var_;
  const framework::BlockDesc* original_block_;
  framework::ProgramDesc acting_program_;

 protected:
  std::vector<framework::BlockDesc*> grad_block_;
};

}  // namespace prim
}  // namespace paddle