run_program_op.h 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <algorithm>
#include <iterator>
19
#include <memory>
20
#include <string>
21
#include <unordered_map>
22
#include <unordered_set>
23 24 25
#include <utility>
#include <vector>

26
#include "paddle/fluid/framework/executor_cache.h"
27
#include "paddle/fluid/framework/op_desc.h"
28 29 30 31 32 33
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/var_type_traits.h"
#include "paddle/fluid/framework/variable.h"
34 35 36 37 38
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

DECLARE_bool(use_mkldnn);
39 40 41 42 43 44

namespace paddle {
namespace operators {

using StepScopeVar = std::vector<framework::Scope *>;
using BlockDesc = framework::BlockDesc;
45
using ProgramDesc = framework::ProgramDesc;
46 47 48 49 50 51 52 53 54 55 56 57 58 59

using Variable = framework::Variable;
using LoDTensor = framework::LoDTensor;
using SelectedRows = framework::SelectedRows;

namespace details {

// all input vars should be LoDTensor & is initialized
static void CheckInputVarStatus(const Variable &var,
                                const std::string &var_name) {
  PADDLE_ENFORCE_EQ(
      var.IsType<LoDTensor>(), true,
      platform::errors::InvalidArgument(
          "The input variable %s of "
60
          "RunProgram(Grad)Op holds "
61 62 63 64 65
          "wrong type. Expect type is LoDTensor, but receive type is %s.",
          var_name, platform::demangle(framework::ToTypeName(var.Type()))));
  PADDLE_ENFORCE_EQ(
      var.Get<LoDTensor>().IsInitialized(), true,
      platform::errors::InvalidArgument("The tensor in input variable %s of "
66
                                        "RunProgram(Grad)Op "
67 68 69 70 71 72 73 74 75 76 77 78
                                        "is not initialized.",
                                        var_name));
}

static void CheckOutputVarStatus(const Variable &src_var,
                                 const Variable &dst_var,
                                 const std::string &var_name) {
  if (dst_var.IsType<LoDTensor>()) {
    PADDLE_ENFORCE_EQ(
        src_var.IsType<LoDTensor>(), true,
        platform::errors::InvalidArgument(
            "The output variable %s get from "
79
            "RunProgram(Grad)Op's internal scope holds "
80 81 82 83 84 85
            "wrong type. Expect type is LoDTensor, but receive type is %s.",
            var_name,
            platform::demangle(framework::ToTypeName(src_var.Type()))));
    PADDLE_ENFORCE_EQ(src_var.Get<LoDTensor>().IsInitialized(), true,
                      platform::errors::InvalidArgument(
                          "The tensor in output variable %s get from "
86
                          "RunProgram(Grad)Op's internal "
87 88 89 90 91 92 93
                          "scope is not initialized.",
                          var_name));
  } else if (dst_var.IsType<SelectedRows>()) {
    PADDLE_ENFORCE_EQ(
        src_var.IsType<SelectedRows>(), true,
        platform::errors::InvalidArgument(
            "The output variable %s get from "
94
            "RunProgram(Grad)Op's internal scope holds "
95 96 97 98 99 100
            "wrong type. Expect type is SelectedRows, but receive type is %s.",
            var_name,
            platform::demangle(framework::ToTypeName(src_var.Type()))));
    PADDLE_ENFORCE_EQ(src_var.Get<SelectedRows>().value().IsInitialized(), true,
                      platform::errors::InvalidArgument(
                          "The tensor in output variable %s get from "
101
                          "RunProgram(Grad)Op's "
102 103 104 105 106
                          "internal scope is not initialized.",
                          var_name));

  } else {
    PADDLE_THROW(platform::errors::InvalidArgument(
107
        "The RunProgram(Grad)Op only support output "
108 109 110 111 112 113 114 115
        "variable of type LoDTensor or SelectedRows, "
        "but received variable %s's type is %s",
        var_name, platform::demangle(framework::ToTypeName(dst_var.Type()))));
  }
}

static void VariableShare(const Variable &src_var, Variable *dst_var) {
  // The previous check ensures that the variable type can only be LoDTensor or
116
  // SelectedRows.
117 118
  if (src_var.IsType<LoDTensor>()) {
    auto *lod_tensor = dst_var->GetMutable<LoDTensor>();
119
    lod_tensor->ShareDataWith(src_var.Get<LoDTensor>());
120 121 122
    lod_tensor->set_lod(src_var.Get<LoDTensor>().lod());
  } else if (src_var.IsType<SelectedRows>()) {
    auto *selected_rows = dst_var->GetMutable<SelectedRows>();
123 124
    selected_rows->mutable_value()->ShareDataWith(
        src_var.Get<SelectedRows>().value());
125 126 127 128 129
    selected_rows->set_rows(src_var.Get<SelectedRows>().rows());
    selected_rows->set_height(src_var.Get<SelectedRows>().height());
  }
}

130
static void ShareVarsIntoScope(const std::vector<Variable *> &vars,
131 132 133
                               const std::vector<std::string> &var_names,
                               framework::Scope *scope) {
  for (size_t i = 0; i < vars.size(); ++i) {
134 135 136
    if (var_names[i] == "Fake_var") {
      continue;
    }
137 138 139
    auto *var = scope->Var(var_names[i]);
    CheckInputVarStatus(*vars[i], var_names[i]);
    VariableShare(*vars[i], var);
140 141 142
  }
}

143 144
static void ShareVarsFromScope(const std::vector<Variable *> &vars,
                               const std::vector<std::string> &var_names,
145
                               const BlockDesc &global_block,
146
                               framework::Scope *scope) {
147
  for (size_t i = 0; i < vars.size(); ++i) {
148 149 150 151
    // NOTE: In case of setting out_tmp.stop_gradient = True in model code, all
    // parameters before generating out_tmp have no @GRAD, it will raise error
    // because we can't findthem in scope. So we skip sharing these vars or
    // var@GRAD if they don't appear in global block.
152
    if (var_names[i] == framework::kEmptyVarName ||
153
        var_names[i] == "Fake_var" || !global_block.HasVar(var_names[i])) {
154
      VLOG(2) << "find variable name is " << var_names[i] << ", skip it!";
155 156 157 158
      continue;
    }
    // NOTE: Here skip not found var is dangerous, if a bug is caused here,
    // the result is grad calculation error, which will be very hidden!
159
    auto *var = scope->FindVar(var_names[i]);
160 161
    PADDLE_ENFORCE_NOT_NULL(
        var, platform::errors::NotFound("The output variable %s is not in "
162
                                        "RunProgram(Grad)Op'"
163 164 165
                                        "s internal scope.",
                                        var_names[i]));
    CheckOutputVarStatus(*var, *vars[i], var_names[i]);
166
    VariableShare(*var, vars[i]);
167 168 169 170 171 172 173 174 175 176 177 178 179 180
  }
}

}  // namespace details

template <typename DeviceContext, typename T>
class RunProgramOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    VLOG(2) << "RunProgramOpKernel Compute";
    // Step 1. prepare inputs, outputs, attrs
    auto &input_vars = ctx.MultiInputVar("X");
    auto &param_vars = ctx.MultiInputVar("Params");
    auto output_vars = ctx.MultiOutputVar("Out");
181
    auto dout_vars = ctx.MultiOutputVar("DOut");
182 183 184

    auto input_var_names = ctx.InputNames("X");
    auto output_var_names = ctx.OutputNames("Out");
185
    auto dout_var_names = ctx.OutputNames("DOut");
186

187 188 189 190 191 192
    // current program may not hold parameters
    std::vector<std::string> param_names;
    if (!param_vars.empty()) {
      param_names = ctx.InputNames("Params");
    }

193 194 195
    auto start_op_index = ctx.Attr<int64_t>("start_op_index");
    auto end_op_index = ctx.Attr<int64_t>("end_op_index");
    auto is_test = ctx.Attr<bool>("is_test");
196
    auto program_id = ctx.Attr<int64_t>("program_id");
197 198 199 200 201 202 203 204 205 206 207

    // NOTE(chenweihang): In order not to add new variable type, use vector
    // here. Originally, here can use scope directly.
    auto *out_scope_vec = ctx.Output<StepScopeVar>("OutScope");
    PADDLE_ENFORCE_EQ(
        out_scope_vec->size(), 1,
        platform::errors::InvalidArgument(
            "The OutScope of RunProgramGradOp should only hold one scope."));

    // Step 2. prepare executor and init persistable variables

208 209 210 211 212 213 214 215 216
    // NOTE(Aurelius84): While training some models, forward can be called many
    // times and then apply backpropagation all at once, such as Reinforcement
    // Learning. Tensor data in multi-step training should be saved into single
    // scope separately. Otherwise, the gradients can be miscalculated because
    // always using the Tensor data of the last step in forward.
    framework::Scope *global_inner_scope = out_scope_vec->front();
    VLOG(2) << "The number of sub scopes before forward: "
            << out_scope_vec->front()->kids().size();
    framework::Scope &scope = global_inner_scope->NewScope();
217

218 219 220 221
    // share input_vars & parameters into scope
    details::ShareVarsIntoScope(input_vars, input_var_names, &scope);
    details::ShareVarsIntoScope(param_vars, param_names, &scope);

222 223
    auto *global_block = ctx.Attr<BlockDesc *>("global_block");

224
    if (end_op_index > start_op_index) {
225
      auto *program = global_block->Program();
226 227 228
      auto cache_info = framework::GetExecutorInfoFromCache(
          *program, ctx.GetPlace(), start_op_index, end_op_index,
          /*is_grad=*/false, program_id, &scope);
229
      auto &parallel_executor = cache_info.first;
230 231 232 233
      // all out_vars are skip_eager_var
      auto &skip_eager_delete_vars =
          framework::ExecutorInfoCache::Instance().SkipEagerDeleteVars(
              program_id, false);
234 235
      if (cache_info.second /*is_new_created*/) {
        parallel_executor->SkipMemoryReuse(/*scope_idx=*/0, input_var_names);
236 237 238 239 240 241 242 243
        skip_eager_delete_vars.insert(skip_eager_delete_vars.end(),
                                      output_var_names.begin(),
                                      output_var_names.end());
        skip_eager_delete_vars.insert(skip_eager_delete_vars.end(),
                                      dout_var_names.begin(),
                                      dout_var_names.end());
        framework::details::ParseSafeEagerDeletionSkipVars(
            *program, end_op_index, output_var_names, &skip_eager_delete_vars);
244 245 246 247 248
      }

      // Step 3. run ops
      parallel_executor->RunWithoutFetch(skip_eager_delete_vars);
    }
249
    // Step 4. Get Output
250 251 252 253
    details::ShareVarsFromScope(output_vars, output_var_names, *global_block,
                                &scope);
    details::ShareVarsFromScope(dout_vars, dout_var_names, *global_block,
                                &scope);
254 255 256

    // Debug info: scope info when run end
    VLOG(3) << framework::GenScopeTreeDebugInfo(out_scope_vec->front());
257 258 259 260 261 262
    // Step 5. Drop all children scopes while testing.
    if (is_test) {
      out_scope_vec->front()->DropKids();
    }
    VLOG(2) << "The number of sub scopes after forward: "
            << out_scope_vec->front()->kids().size();
263 264 265
#ifdef PADDLE_WITH_MKLDNN
    if (FLAGS_use_mkldnn) DontClearMKLDNNCache(ctx.GetPlace());
#endif
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
  }
};

template <typename DeviceContext, typename T>
class RunProgramGradOpKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    VLOG(2) << "RunProgramGradOpKernel Compute";
    // Step 1. prepare inputs and outputs
    auto &output_grad_vars = ctx.MultiInputVar(framework::GradVarName("Out"));
    auto input_grad_vars = ctx.MultiOutputVar(framework::GradVarName("X"));
    auto param_grad_vars = ctx.MultiOutputVar(framework::GradVarName("Params"));

    // if all output vars are set to stop_gradient, grad op no need to executed
    if (input_grad_vars.empty() && param_grad_vars.empty()) return;

    auto output_grad_var_names = ctx.InputNames(framework::GradVarName("Out"));
    // NOTE: after PR22939 [Add double grad] merged, the grad op maker's
    //   SetOutput will set to None if the input var stop_gradient=True,
    //   it will cause an NotFound error when ctx.OutputNames() is called
    std::vector<std::string> input_grad_var_names;
    std::vector<std::string> param_grad_names;
    if (!input_grad_vars.empty()) {
      input_grad_var_names = ctx.OutputNames(framework::GradVarName("X"));
    }
    if (!param_grad_vars.empty()) {
      param_grad_names = ctx.OutputNames(framework::GradVarName("Params"));
    }

    auto *block = ctx.Attr<BlockDesc *>("global_block");
    auto orig_end_op_index = ctx.Attr<int64_t>("end_op_index");
297
    auto program_id = ctx.Attr<int64_t>("program_id");
298
    // NOTE: skip `shape` and `fill_constant` op created by
299 300
    // fluid.backward.gradients, one forward output will generate one `shape`
    // and `fill_constant`
301 302 303 304 305 306 307 308
    int64_t start_op_index = orig_end_op_index + (output_grad_vars.size() * 2);
    int64_t end_op_index = block->OpSize();

    auto *out_scope_vec = ctx.Input<StepScopeVar>("OutScope");
    PADDLE_ENFORCE_EQ(
        out_scope_vec->size(), 1,
        platform::errors::InvalidArgument(
            "The OutScope of RunProgramGradOp should only hold one scope."));
309 310 311 312 313 314 315 316 317 318

    framework::Scope *global_inner_scope = out_scope_vec->front();
    auto sub_scope_num = global_inner_scope->kids().size();
    VLOG(2) << "The number of sub scopes before backward: " << sub_scope_num;
    PADDLE_ENFORCE_GT(sub_scope_num, 0,
                      platform::errors::InvalidArgument(
                          "The OutScope of RunProgramGradOp should hold at "
                          "least one sub scope."));

    auto &scope = *(global_inner_scope->kids().front());
319
    auto *global_block = ctx.Attr<BlockDesc *>("global_block");
320

321 322
    if (end_op_index > start_op_index) {
      // Step 2. prepare executor and scope
323
      auto *program = global_block->Program();
324 325 326
      auto cache_info = framework::GetExecutorInfoFromCache(
          *program, ctx.GetPlace(), start_op_index, end_op_index,
          /*is_grad*/ true, program_id, &scope);
327 328
      auto &parallel_executor = cache_info.first;

329 330 331 332 333 334 335 336 337 338 339 340 341
      auto &skip_eager_delete_vars =
          framework::ExecutorInfoCache::Instance().SkipEagerDeleteVars(
              program_id, true);
      if (cache_info.second /*is_new_created*/) {
        parallel_executor->SkipMemoryReuse(/*scope_idx=*/0,
                                           output_grad_var_names);

        skip_eager_delete_vars.insert(skip_eager_delete_vars.end(),
                                      input_grad_var_names.begin(),
                                      input_grad_var_names.end());
        framework::details::AppendSkipDeletionVars(param_grad_names,
                                                   &skip_eager_delete_vars);
      }
342 343 344 345 346 347 348 349 350 351

      details::ShareVarsIntoScope(output_grad_vars, output_grad_var_names,
                                  &scope);
      // Debug info: scope info when run end
      VLOG(3) << framework::GenScopeTreeDebugInfo(out_scope_vec->front());

      // Step 3. run ops
      parallel_executor->RunWithoutFetch(
          /*skip_eager_delete_vars=*/skip_eager_delete_vars);
    }
352

353
    // Step 4. get outputs
354 355 356 357
    details::ShareVarsFromScope(input_grad_vars, input_grad_var_names,
                                *global_block, &scope);
    details::ShareVarsFromScope(param_grad_vars, param_grad_names,
                                *global_block, &scope);
358 359 360 361 362

    // Step5. drop current scope
    global_inner_scope->DeleteScope(&scope);
    VLOG(2) << "The number of sub scopes after backward: "
            << global_inner_scope->kids().size();
363 364 365 366 367
  }
};

}  // namespace operators
}  // namespace paddle