fast_threaded_ssa_graph_executor.cc 5.4 KB
Newer Older
Y
Stash  
yuyang18 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
#include <string>
#include <vector>
#include "paddle/fluid/framework/details/fetch_op_handle.h"
#include "paddle/fluid/framework/details/multi_devices_helper.h"

namespace paddle {
namespace framework {
namespace details {

FastThreadedSSAGraphExecutor::FastThreadedSSAGraphExecutor(
    const ExecutionStrategy &strategy, const std::vector<Scope *> &local_scopes,
    const std::vector<platform::Place> &places,
    std::unique_ptr<ir::Graph> &&graph)
    : strategy_(strategy),
      local_scopes_(local_scopes),
      places_(places),
      graph_(std::move(graph)),
      pool_(strategy.num_threads_ +
            1),  // add one more thread for generate op_deps
      fetch_ctxs_(places) {
  auto &ops = graph_->Get<details::GraphOps>("ops");

  for (auto &op : ops) {
    int dep = static_cast<int>(op->NotReadyInputSize());
    op_deps_.emplace(op.get(), dep);
    if (dep == 0) {
      bootstrap_ops_.emplace_back(op.get());
    }
  }

  PrepareAtomicOpDeps();
}

FeedFetchList FastThreadedSSAGraphExecutor::Run(
    const std::vector<std::string> &fetch_tensors) {
  std::unique_ptr<std::unordered_map<OpHandleBase *, std::atomic<int>>>
      op_deps = atomic_op_deps_.get();
  PrepareAtomicOpDeps();

  paddle::framework::FeedFetchList fetches;
  fetches.resize(fetch_tensors.size());
  std::unordered_map<std::string, std::vector<VarHandleBase *>> fetched_vars;
  std::vector<std::unique_ptr<FetchOpHandle>> fetch_ops;

  for (auto &fetch_var_name : fetch_tensors) {
    for (auto &var_map : graph_->Get<details::GraphVars>("vars")) {
      auto it = var_map.find(fetch_var_name);
      if (it != var_map.end()) {
        fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get());
      }
    }
  }

  for (size_t i = 0; i < fetch_tensors.size(); ++i) {
    auto &var_name = fetch_tensors[i];
    auto fetched_var_it = fetched_vars.find(var_name);
    PADDLE_ENFORCE(fetched_var_it != fetched_vars.end(),
                   "Cannot find fetched variable.(Perhaps the main_program "
                   "is not set to ParallelExecutor)");

    auto &vars = fetched_var_it->second;

X
Xin Pan 已提交
77 78 79
    ir::Node *fetch_node =
        graph_->CreateEmptyNode("fetch", ir::Node::Type::kOperation);
    auto *op = new FetchOpHandle(fetch_node, &fetches, i, &local_scopes_);
Y
Stash  
yuyang18 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
    fetch_ops.emplace_back(op);

    for (auto &p : places_) {
      op->SetDeviceContext(p, fetch_ctxs_.Get(p));
    }

    for (auto *var : vars) {
      op->AddInput(var);
    }

    (*op_deps)[op] = static_cast<int>(op->NotReadyInputSize());
  }

  size_t num_complete = 0;
  remaining_ = 0;
  BlockingQueue<size_t> complete_q;
  for (auto op : bootstrap_ops_) {
    RunOpAsync(op_deps.get(), op, &complete_q);
  }

  while (num_complete != op_deps->size()) {
    size_t num_comp = complete_q.Pop();
    if (num_comp == -1UL) {
Y
yuyang18 已提交
103 104 105 106 107 108 109 110 111
      int remaining = 0;
      while (true) {
        remaining = remaining_;
        if (remaining == 0) {
          break;
        }
        for (int i = 0; i < remaining; ++i) {
          complete_q.Pop();
        }
Y
Stash  
yuyang18 已提交
112
      }
Y
yuyang18 已提交
113
      exception_.ReThrow();
Y
Stash  
yuyang18 已提交
114 115 116 117
    }
    num_complete += num_comp;
  }
  // Wait FetchOps.
X
Xin Pan 已提交
118
  ClearFetchOp(graph_.get(), &fetch_ops);
Y
Stash  
yuyang18 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132
  return fetches;
}
void FastThreadedSSAGraphExecutor::RunOpAsync(
    std::unordered_map<OpHandleBase *, std::atomic<int>> *op_deps,
    OpHandleBase *op, BlockingQueue<size_t> *complete_q) {
  ++remaining_;
  this->pool_.enqueue([=] {
    OpHandleBase *op_to_run = op;
    size_t complete = 0;
    while (op_to_run != nullptr) {
      try {
        op_to_run->Run(strategy_.use_cuda_);
        ++complete;
      } catch (...) {
Y
yuyang18 已提交
133
        exception_.Catch(std::current_exception());
Y
Stash  
yuyang18 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
        --remaining_;
        complete_q->Push(-1UL);
        return;
      }
      auto &outputs = op_to_run->Outputs();
      op_to_run = nullptr;
      for (auto &output : outputs) {
        for (auto &pending_op : output->PendingOps()) {
          std::atomic<int> &deps = op_deps->at(pending_op);
          if (deps.fetch_sub(1) == 1) {  // pending_op ready
            if (op_to_run == nullptr) {
              op_to_run = pending_op;
            } else {
              this->RunOpAsync(op_deps, pending_op, complete_q);
            }
          }
        }
      }
    }
    --remaining_;
    complete_q->Push(complete);
  });
}
void FastThreadedSSAGraphExecutor::PrepareAtomicOpDeps() {
  atomic_op_deps_ = pool_.enqueue([&] {
    std::unordered_map<OpHandleBase *, std::atomic<int>> *op_deps =
        new std::unordered_map<OpHandleBase *, std::atomic<int>>;
    for (auto &pair : op_deps_) {
      (*op_deps)[pair.first] = pair.second;
    }
    return std::unique_ptr<
        std::unordered_map<OpHandleBase *, std::atomic<int>>>(op_deps);
  });
}
Y
yuyang18 已提交
168 169

const ir::Graph &FastThreadedSSAGraphExecutor::Graph() const { return *graph_; }
Y
Stash  
yuyang18 已提交
170 171 172
}  // namespace details
}  // namespace framework
}  // namespace paddle