executor_cache.cc 11.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/executor_cache.h"
16

17
#include "paddle/fluid/framework/op_info.h"
18

19 20 21 22 23
namespace paddle {
namespace framework {
class ProgramDesc;
}  // namespace framework
}  // namespace paddle
24 25 26 27 28 29

namespace paddle {
namespace framework {

namespace details {

30
static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) {
31 32
  framework::ExecutionStrategy execution_strategy;

33 34
  auto device_type = platform::Place2DeviceType(place);
  switch (device_type) {
35 36 37 38 39 40 41 42 43 44 45 46
    case platform::DeviceType::CPU: {
      execution_strategy.num_threads_ = 2;
      break;
    }
    case platform::DeviceType::CUDA: {
      // NOTE: According experiments, one thread is faster in
      // most model training.
      execution_strategy.num_threads_ = 1;
      break;
    }
    case platform::DeviceType::XPU: {
      execution_strategy.num_threads_ = 1;
47 48 49 50
      break;
    }
    case platform::DeviceType::IPU: {
      execution_strategy.num_threads_ = 1;
51 52 53 54
      break;
    }
    case platform::DeviceType::NPU: {
      execution_strategy.num_threads_ = 1;
55 56
      break;
    }
57 58 59 60
    case platform::DeviceType::CUSTOM_DEVICE: {
      execution_strategy.num_threads_ = 1;
      break;
    }
61 62
    default:
      PADDLE_THROW(platform::errors::Unavailable("Unsupported Device type %d.",
63
                                                 device_type));
64
  }
65
  execution_strategy.use_device_ = device_type;
66 67 68 69 70 71

  return execution_strategy;
}

void AppendSkipDeletionVars(const std::vector<std::string> &append_vars,
                            std::vector<std::string> *all_vars) {
72 73 74 75 76
  for (auto &var : append_vars) {
    all_vars->emplace_back(var);
  }
}

77 78 79 80 81 82 83 84 85 86 87 88 89
/*
 * NOTE(Aurelius84): In ParallelExecutor, memory optimized pass will be applied.
 * To avoid eagerly deleting last alive variables which are necessary in
 * backward program, we firstly parse these variable names as
 * skip_eager_vars. While executing pe.run skip_eager_vars are used to
 * skip memory optimization.
 *
 * Variables satisfying the following rules are considered as skip_eager_var:
 *
 *   1. it is an output var in run_program_op
 *   2. it is an input var used in backward_op
 */
void ParseSafeEagerDeletionSkipVars(
90 91
    const ProgramDesc &program,
    int64_t forward_op_nums,
92 93 94
    const std::vector<std::string> &output_var_names,
    std::vector<std::string> *skip_eager_delete_vars) {
  auto all_ops = program.Block(0).AllOps();
95
  auto &op_info_map = OpInfoMap::Instance();
96 97 98 99 100 101 102 103 104
  // NOTE: skip `shape` and `fill_constant` op created by
  // fluid.backward.gradients, one forward output will generate one `shape`
  // and `fill_constant`.
  size_t backward_op_start_index =
      forward_op_nums + (output_var_names.size() * 2);

  // step 2: parse the necessary variable of backward op
  std::unordered_set<std::string> op_outputs;
  std::unordered_set<std::string> op_inputs;
105 106
  std::unordered_set<std::string> no_need_buffer_ins;

107 108
  for (auto i = backward_op_start_index; i < all_ops.size(); ++i) {
    framework::OpDesc *op = all_ops[i];
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
    // NOTE: skip NoNeedBufferVars of grad_op and GC its memory in advance.
    auto &op_info = op_info_map.Get(op->Type());
    auto &inferer = op_info.NoNeedBufferVarsInferer();
    no_need_buffer_ins.clear();
    if (inferer != nullptr) {
      no_need_buffer_ins =
          inferer(op->Inputs(), op->Outputs(), op->GetAttrMap());
    }
    for (auto &in_names : op->Inputs()) {
      if (no_need_buffer_ins.count(in_names.first) == 0) {
        for (auto &in_name : in_names.second) {
          op_inputs.emplace(in_name);
        }
      } else {
        VLOG(2) << op->Type() << " has no_need_buffer_in: " << in_names.first
                << " , skip it.";
      }
126
    }
127

128
    for (const std::string &out_arg_name : op->OutputArgumentNames()) {
129
      op_outputs.emplace(out_arg_name);
130 131 132 133 134
    }
  }
  // For the grad op input variables, if it is not output of grad_op, it may
  // be output of forward op and we should set the variables as skip_var to
  // prevent it being deleted when grad op is called multiple times.
135 136 137 138
  for (const std::string &var_name : op_inputs) {
    if (op_outputs.find(var_name) == op_outputs.end()) {
      VLOG(2) << "skip eager var: " << var_name;
      skip_eager_delete_vars->emplace_back(var_name);
139 140
    }
  }
141
  VLOG(3) << "Found skip_eager_delete_vars: " << skip_eager_delete_vars->size();
142
}
143

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
void AppendSkipDeletionVars(const std::vector<std::string> &append_vars,
                            std::set<std::string> *all_vars) {
  for (auto &var : append_vars) {
    all_vars->insert(var);
  }
}

std::set<std::string> ParseSafeEagerDeletionSkipVarsSet(
    const ProgramDesc &backward_program) {
  std::set<std::string> skip_eager_delete_vars;
  auto backward_ops = backward_program.Block(0).AllOps();
  auto &op_info_map = OpInfoMap::Instance();
  std::unordered_set<std::string> op_outputs;
  std::unordered_set<std::string> op_inputs;
  std::unordered_set<std::string> no_need_buffer_ins;
  for (size_t i = 0; i < backward_ops.size(); ++i) {
    framework::OpDesc *op = backward_ops[i];
    if (op->Type() == "share_buffer") {
      VLOG(1) << "skip share_buffer op";
      continue;
    }
    // NOTE: skip NoNeedBufferVars of grad_op and GC its memory in advance.
    auto &op_info = op_info_map.Get(op->Type());
    auto &inferer = op_info.NoNeedBufferVarsInferer();
    no_need_buffer_ins.clear();
    if (inferer != nullptr) {
      no_need_buffer_ins =
          inferer(op->Inputs(), op->Outputs(), op->GetAttrMap());
    }
    for (auto &in_names : op->Inputs()) {
      if (no_need_buffer_ins.count(in_names.first) == 0) {
        for (auto &in_name : in_names.second) {
          op_inputs.emplace(in_name);
        }
      } else {
        VLOG(2) << op->Type() << " has no_need_buffer_in: " << in_names.first
                << " , skip it.";
      }
    }
    for (const std::string &out_arg_name : op->OutputArgumentNames()) {
      op_outputs.emplace(out_arg_name);
    }
  }
  for (const std::string &var_name : op_inputs) {
    if (op_outputs.find(var_name) == op_outputs.end()) {
      VLOG(1) << "skip eager var: " << var_name;
      skip_eager_delete_vars.insert(var_name);
    }
  }
  VLOG(1) << "Found skip_eager_delete_vars: " << skip_eager_delete_vars.size();
  return skip_eager_delete_vars;
}
196 197 198 199 200 201 202 203 204 205
}  // namespace details

// C++11 removes the need for manual locking. Concurrent execution shall wait if
// a static local variable is already being initialized.
// https://stackoverflow.com/questions/11711920/how-to-implement-multithread-safe-singleton-in-c11-without-using-mutex
ExecutorInfoCache &ExecutorInfoCache::Instance() {
  static ExecutorInfoCache g_exe_cache_info_map;
  return g_exe_cache_info_map;
}

206
static PEAndGraphPair CreateExecutorInfo(
207 208 209 210 211
    const ProgramDesc &program_desc,
    const platform::Place &place,
    int64_t start_op_index,
    int64_t end_op_index,
    framework::Scope *scope,
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
    const details::BuildStrategy &build_strategy) {
  auto execution_strategy = details::GetExecutionStrategy(place);
  auto graph = std::make_shared<framework::ir::Graph>(
      program_desc, start_op_index, end_op_index);
  auto parallel_executor = std::make_shared<framework::ParallelExecutor>(
      place, scope, execution_strategy, build_strategy, graph.get());
  parallel_executor->PrepareVariables(scope);
  return std::make_pair(parallel_executor, graph);
}

PEAndGraphPair CreateFixOrderExecutorInfo(const ProgramDesc &program_desc,
                                          const platform::Place &place,
                                          int64_t start_op_index,
                                          int64_t end_op_index,
                                          framework::Scope *scope) {
  details::BuildStrategy build_strategy;
  build_strategy.fix_op_run_order_ = true;
229 230
  auto pe_and_graph = CreateExecutorInfo(
      program_desc, place, start_op_index, end_op_index, scope, build_strategy);
231 232 233
  return pe_and_graph;
}

234 235
CacheInfo GetExecutorInfoFromCache(const ProgramDesc &program_desc,
                                   const platform::Place &place,
236 237 238 239
                                   int64_t start_op_index,
                                   int64_t end_op_index,
                                   bool is_grad,
                                   int64_t program_id,
240
                                   framework::Scope *scope) {
241 242
  auto &cached_exe_info = framework::ExecutorInfoCache::Instance();

243
  if (!cached_exe_info.Has(program_id, is_grad)) {
244 245 246 247 248 249 250
    // TODO(Aurelius84): Consider to use LRU algorithm to replace this.
    if (cached_exe_info.Size() > 4u /* max_cached_size*/) {
      VLOG(2) << "The cached info size has exceeded max_cached_size: 4, clear "
                 "all cache!";
      cached_exe_info.Finalize();
    }

251 252
    VLOG(1) << "create exe_info for " << program_id << " is_grad: " << is_grad;
    auto &build_strategy = cached_exe_info.GetBuildStrategy(program_id);
253

254
    // 2. Construct Graph and ParallelExecutor.
255 256 257 258 259 260
    auto pe_and_graph = CreateExecutorInfo(program_desc,
                                           place,
                                           start_op_index,
                                           end_op_index,
                                           scope,
                                           build_strategy);
261

262 263
    // 3. Insert value into cached map.
    auto &cached_value = cached_exe_info.GetMutable(program_id, is_grad);
264 265 266
    cached_value.executor_ = pe_and_graph.first;
    cached_value.graph_ = pe_and_graph.second;
    return std::make_pair(pe_and_graph.first, /*is_new_created=*/true);
267
  } else {
268 269 270
    VLOG(1) << "get exe_info from cache by: " << program_id
            << " is_grad: " << is_grad;
    auto &cached_value = cached_exe_info.GetMutable(program_id, is_grad);
271

272
    auto &parallel_executor = cached_value.executor_;
273 274 275 276 277 278 279
    // update op_handle scope_map in pe->executor_->Graph
    std::unordered_map<Scope *, Scope *> scope_map = {
        {parallel_executor->GetLocalScopes().front(), scope}};
    parallel_executor->ResetOpHandleScopeMapOfGraphs(scope_map);
    // need to recreate tmp variables in new scope
    parallel_executor->PrepareVariables(scope);

280
    return std::make_pair(parallel_executor, /*is_new_created=*/false);
281 282 283
  }
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
InterpreterCoreInfoCache &InterpreterCoreInfoCache::Instance() {
  static InterpreterCoreInfoCache g_info_cache;
  return g_info_cache;
}

std::shared_ptr<InterpreterCore> CreateInterpreterCoreInfoToCache(
    const ProgramDesc &program_desc,
    const platform::Place &place,
    bool is_grad,
    int64_t program_id,
    framework::Scope *scope) {
  auto &interpretercore_info_cache =
      framework::InterpreterCoreInfoCache::Instance();
  if (interpretercore_info_cache.Size() > 4u /* max_cached_size*/) {
    interpretercore_info_cache.Finalize();
  }
  auto core = std::make_shared<InterpreterCore>(
      place,
      program_desc.Block(0),
      /*skip_gc_vars=*/std::set<std::string>(),
      scope,
      /*used_for_jit=*/true);
  auto &cached_value =
      interpretercore_info_cache.GetMutable(program_id, is_grad);
  cached_value.core_ = core;
  return core;
}

312 313
}  // namespace framework
}  // namespace paddle