reference_count_pass.cc 11.3 KB
Newer Older
S
sneaxiy 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

S
sneaxiy 已提交
15
#include <memory>
S
sneaxiy 已提交
16
#include <queue>
S
sneaxiy 已提交
17
#include <string>
S
fix bug  
sneaxiy 已提交
18
#include <type_traits>
S
sneaxiy 已提交
19 20 21
#include <unordered_map>
#include <unordered_set>
#include <utility>
S
sneaxiy 已提交
22 23 24
#include <vector>

#include "paddle/fluid/framework/details/computation_op_handle.h"
S
sneaxiy 已提交
25
#include "paddle/fluid/framework/details/eager_deletion_op_handle.h"
S
sneaxiy 已提交
26
#include "paddle/fluid/framework/details/multi_devices_helper.h"
S
fix bug  
sneaxiy 已提交
27
#include "paddle/fluid/framework/details/op_graph_view.h"
S
sneaxiy 已提交
28
#include "paddle/fluid/framework/details/reference_count_pass.h"
S
sneaxiy 已提交
29
#include "paddle/fluid/framework/details/reference_count_pass_helper.h"
X
Xin Pan 已提交
30
#include "paddle/fluid/framework/ir/graph_helper.h"
S
sneaxiy 已提交
31 32 33 34 35

namespace paddle {
namespace framework {
namespace details {

S
sneaxiy 已提交
36 37 38
// A functor to shrink/remove operators who depend on other operators in a set
class ShrinkDepsOpFunctor {
 private:
S
fix bug  
sneaxiy 已提交
39 40
  enum RelationShip { kSame = 0, kNoDeps = 1, kBefore = 2, kAfter = 3 };

S
sneaxiy 已提交
41 42
 public:
  explicit ShrinkDepsOpFunctor(const std::vector<OpHandleBase *> &all_ops)
S
fix bug  
sneaxiy 已提交
43 44 45
      : graph_(all_ops) {}

  template <typename OpSet>
S
sneaxiy 已提交
46
  OpSet operator()(const OpSet &op_set) const {
S
fix bug  
sneaxiy 已提交
47 48 49 50
    using KeyType = typename OpSet::key_type;
    static_assert(
        std::is_base_of<OpHandleBase,
                        typename std::remove_pointer<KeyType>::type>::value,
S
sneaxiy 已提交
51
        "Key type of OpSet must be OpHandleBase, or derived of OpHandleBase");
S
fix bug  
sneaxiy 已提交
52

S
sneaxiy 已提交
53
    if (op_set.size() <= 1) return op_set;
S
fix bug  
sneaxiy 已提交
54
    std::vector<OpHandleBase *> ops(op_set.begin(), op_set.end());
S
fix bug  
sneaxiy 已提交
55
    OpSet ret;
S
fix bug  
sneaxiy 已提交
56 57 58 59
    auto rels = GetRelations(ops);
    auto not_before = [](RelationShip r) { return r != kBefore; };
    for (size_t i = 0; i < rels.size(); ++i) {
      if (std::all_of(rels[i].begin(), rels[i].end(), not_before)) {
S
sneaxiy 已提交
60
        ret.emplace(static_cast<KeyType>(ops[i]));
S
fix bug  
sneaxiy 已提交
61 62 63 64 65 66 67
      }
    }
    return ret;
  }

 private:
  std::vector<std::vector<RelationShip>> GetRelations(
S
sneaxiy 已提交
68
      const std::vector<OpHandleBase *> &ops) const {
S
fix bug  
sneaxiy 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
    std::unordered_map<OpHandleBase *, size_t> op_to_idx;
    for (size_t i = 0; i < ops.size(); ++i) {
      PADDLE_ENFORCE(graph_.HasOp(ops[i]), "Op does not exist in graph");
      op_to_idx[ops[i]] = i;
    }

    PADDLE_ENFORCE(op_to_idx.size() == ops.size(), "Duplicate ops");

    std::vector<std::vector<RelationShip>> ret(ops.size());
    for (auto &e : ret) {
      e.assign(ops.size(), kSame);
    }

    size_t found_num = ops.size();
    size_t total_num = ops.size() * ops.size();
    auto visitor = [&](OpHandleBase *op, size_t i) {
      auto it = op_to_idx.find(op);
      if (it != op_to_idx.end()) {
        size_t j = it->second;
S
fix bug  
sneaxiy 已提交
88
        if (i != j && ret[i][j] == kSame) {
S
fix bug  
sneaxiy 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
          ret[i][j] = kBefore;
          ret[j][i] = kAfter;
          found_num += 2;
          if (found_num == total_num) {
            return false;
          }
        }
      }
      return true;
    };

    for (size_t i = 0; i < ops.size(); ++i) {
      auto sub_visitor = [&, i](OpHandleBase *op) { return visitor(op, i); };
      if (!graph_.VisitAllPendingOps(ops[i], sub_visitor)) {
        break;
      }
    }

    for (size_t i = 0; i < ops.size(); ++i) {
      for (size_t j = i + 1; j < ops.size(); ++j) {
        if (ret[i][j] != kSame) continue;
        ret[i][j] = kNoDeps;
        ret[j][i] = kNoDeps;
      }
    }

    return ret;
  }

  const OpGraphView graph_;
};

S
sneaxiy 已提交
121 122 123 124
/**
 * Find the nearest downstream computation op handle. If the op is a
 * computation op, just return itself.
 */
S
sneaxiy 已提交
125 126 127 128 129
static ComputationOpHandle *FindNextComputationOpHandleOrReturnItself(
    OpHandleBase *op, size_t scope_idx) {
  std::queue<OpHandleBase *> q;
  std::unordered_set<OpHandleBase *> visited;
  q.push(op);
S
sneaxiy 已提交
130
  do {
S
sneaxiy 已提交
131 132 133 134 135 136 137 138 139 140
    auto *op = q.front();
    q.pop();
    auto *compute_op = dynamic_cast<ComputationOpHandle *>(op);
    if (compute_op != nullptr && compute_op->GetScopeIdx() == scope_idx) {
      return compute_op;
    }
    for (auto *out_var : op->Outputs()) {
      for (auto *pending_op : out_var->PendingOps()) {
        if (visited.count(pending_op)) continue;
        visited.insert(pending_op);
141
        q.push(pending_op);
S
sneaxiy 已提交
142 143
      }
    }
S
sneaxiy 已提交
144
  } while (!q.empty());
S
sneaxiy 已提交
145 146 147
  return nullptr;
}

S
sneaxiy 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
static std::unordered_set<ComputationOpHandle *>
ExtractComputationOpFromLastLivedVar(VarHandle *var, size_t scope_idx,
                                     const ShrinkDepsOpFunctor &shrink_func,
                                     bool *ok) {
  // stage one. Get last op for variable.
  std::unordered_set<OpHandleBase *> candidates;
  {
    if (var->PendingOps().empty() && var->GeneratedOp()) {
      // No operator depends on this variable. So the last operator is the op
      // who generates this variable.
      candidates.emplace(var->GeneratedOp());
    } else {
      candidates = var->PendingOps();
    }

    // No pending ops or generated op is nullptr
    if (candidates.empty()) {
      *ok = false;
      return {};
    }
  }

  // stage two. Try to cast them to computation op.
  // return (*ok=false) when failed.
  //
  // The reason why we cannot make any types of op handle to be the last lived
  // op is:
  //    some op handle may operate on many DeviceContext, however, our garbage
  //    collector can only wait one DeviceContext for now. So currently, we wait
  //    the nearest compute op.
  std::unordered_set<ComputationOpHandle *> computation_op;
  {
    for (auto *op : candidates) {
      auto *compute_op =
          FindNextComputationOpHandleOrReturnItself(op, scope_idx);
      if (compute_op == nullptr) {
        *ok = false;
        return {};
      }
      computation_op.emplace(compute_op);
    }
  }

  // stage three. Try to shrink computation op if they depend on each other.
  // Get the smallest set of the most ops.
  *ok = true;
  return shrink_func(computation_op);
}

S
sneaxiy 已提交
197
/**
198
 * Shrink op dependencies according to no need buffer vars.
S
sneaxiy 已提交
199 200
 *
 * If some ops do not need Tensor buffer of any input,
S
sneaxiy 已提交
201 202
 * just remove the dependency of this op, i.e, decrease reference count.
 *
S
sneaxiy 已提交
203 204 205 206 207 208
 * For example, input Y of elementwise_add_grad op is only used to infer shape
 * and lod of Y@GRAD, we do not need the buffer of input Y. Data buffer of
 * input Y can be collected before elementwise_add_grad op runs.
 *
 * This method returns whether the dependency count decreases to 0, and
 * shrinks op dependency if possible.
S
sneaxiy 已提交
209 210 211 212
 */
static bool ShrinkNoNeedBufferVarOpDependency(
    const std::string &var_name,
    std::unordered_set<ComputationOpHandle *> *op_handles) {
S
sneaxiy 已提交
213 214 215 216 217 218 219 220 221 222 223 224
  std::vector<ComputationOpHandle *> skip_ops;
  for (auto *op_handle : *op_handles) {
    auto *op_base = op_handle->GetOp();
    auto &inferer = op_base->Info().NoNeedBufferVarsInferer();
    if (!inferer) {
      continue;
    }

    std::unordered_set<std::string> no_need_buffer_vars =
        inferer(op_base->Inputs(), op_base->Outputs(), op_base->Attrs());

    // Check whether var_name occurs in other inputs or outputs of the op
S
sneaxiy 已提交
225
    // If it occurs, we cannot decrease the dependency number.
S
sneaxiy 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
    bool occurred_in_other_vars = false;
    for (auto &in_pair : op_base->Inputs()) {
      if (no_need_buffer_vars.count(in_pair.first) > 0) {
        continue;
      }

      auto &args = in_pair.second;
      auto iter = std::find(args.begin(), args.end(), var_name);
      if (iter != args.end()) {
        occurred_in_other_vars = true;
        break;
      }
    }

    if (occurred_in_other_vars) {
      continue;
    }

    for (auto &out_pair : op_base->Outputs()) {
      auto &args = out_pair.second;
      auto iter = std::find(args.begin(), args.end(), var_name);
      if (iter != args.end()) {
        occurred_in_other_vars = true;
        break;
      }
    }

    if (!occurred_in_other_vars) {
      VLOG(2) << "Shrink var " << var_name << " in op " << op_handle->Name();
      skip_ops.emplace_back(op_handle);
    }
  }

  if (skip_ops.size() == op_handles->size()) {
    op_handles->clear();
    return true;
  } else {
    for (auto *skip_op : skip_ops) {
      op_handles->erase(skip_op);
    }
    return false;
  }
}

270
void ReferenceCountPass::ApplyImpl(ir::Graph *graph) const {
S
sneaxiy 已提交
271 272 273 274
  auto &ref_cnts = Get<std::vector<ReferenceCountMap>>(kGlobalReferenceCount);
  auto &last_live_ops_of_vars =
      Get<std::vector<LastLiveOpsOfVars>>(kLastLiveOpsOfVars);

S
sneaxiy 已提交
275 276 277
  PADDLE_ENFORCE(last_live_ops_of_vars.empty() && ref_cnts.empty(),
                 "Last Live Ops and Reference Counts of vars should be "
                 "initialized at here.");
S
sneaxiy 已提交
278

S
sneaxiy 已提交
279
  const auto &vars = graph->Get<GraphVars>(kGraphVars);
S
fix bug  
sneaxiy 已提交
280

S
sneaxiy 已提交
281 282
  last_live_ops_of_vars.resize(vars.size());
  ref_cnts.resize(vars.size());
S
fix bug  
sneaxiy 已提交
283

S
sneaxiy 已提交
284 285
  ShrinkDepsOpFunctor shrink_func(
      ir::FilterByNodeWrapper<OpHandleBase>(*graph));
S
sneaxiy 已提交
286

S
sneaxiy 已提交
287 288 289 290 291
  for (size_t i = 0; i < vars.size(); ++i) {
    for (auto &name_var_pair : vars[i]) {
      // Whether this variable can be reused or deleted? If not, we do not
      // compute reference counts and dependencies.
      VarDesc *var_desc = TryGetLatestVarDesc(name_var_pair.second);
S
sneaxiy 已提交
292 293

      if (var_desc == nullptr || var_desc->Persistable()) {
S
sneaxiy 已提交
294
        continue;
S
sneaxiy 已提交
295 296 297 298
      }

      auto var_type = var_desc->Proto()->type().type();
      if (var_type != proto::VarType::LOD_TENSOR &&
S
sneaxiy 已提交
299 300
          var_type != proto::VarType::SELECTED_ROWS &&
          var_type != proto::VarType::LOD_TENSOR_ARRAY) {
S
sneaxiy 已提交
301
        // Var type cannot be deleted
S
sneaxiy 已提交
302
        continue;
S
sneaxiy 已提交
303 304
      }

S
sneaxiy 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
      auto &var_name = name_var_pair.first;
      auto &var_handles = name_var_pair.second;

      for (auto iter = var_handles.rbegin(); iter != var_handles.rend();
           ++iter) {
        bool ok;
        auto result =
            ExtractComputationOpFromLastLivedVar(*iter, i, shrink_func, &ok);

        // Seldomly, some vars may have no pending or preceding computation ops
        // Just break;
        if (!ok) break;
        VLOG(10) << "Extract " << result.size() << " ops of var " << var_name;

        size_t original_op_deps = result.size();
S
sneaxiy 已提交
320 321 322
        // If all ops do not need buffer of var_name, calculate reference count
        // of the previous version of var_name.
        if (ShrinkNoNeedBufferVarOpDependency(var_name, &result)) {
S
sneaxiy 已提交
323 324 325 326 327 328 329 330 331 332
          VLOG(10) << "Try to precede reference count computing at var "
                   << var_name;
          continue;
        }

        size_t final_op_deps = result.size();
        if (final_op_deps < original_op_deps) {
          VLOG(5) << "Shrink op deps from " << original_op_deps << " to "
                  << final_op_deps;
        }
S
fix bug  
sneaxiy 已提交
333

S
sneaxiy 已提交
334 335 336 337
        PADDLE_ENFORCE(!result.empty(), "Last living ops of %s cannot be empty",
                       var_name);
        ref_cnts[i].emplace(var_name, result.size());
        last_live_ops_of_vars[i].emplace(var_name, std::move(result));
S
sneaxiy 已提交
338
        break;
S
fix bug  
sneaxiy 已提交
339
      }
S
sneaxiy 已提交
340 341 342

      // Seldomly, all preceding trying failed.
      // Just skip this corner case
S
sneaxiy 已提交
343 344 345 346 347 348 349 350 351 352 353
    }
  }
}

}  // namespace details
}  // namespace framework
}  // namespace paddle

REGISTER_PASS(reference_count_pass,
              paddle::framework::details::ReferenceCountPass)
    .RequirePassAttr(paddle::framework::details::kGlobalReferenceCount)
S
sneaxiy 已提交
354
    .RequirePassAttr(paddle::framework::details::kLastLiveOpsOfVars);