carrier.h 3.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

17
#include <condition_variable>
18
#include <memory>
19
#include <mutex>
20
#include <set>
21 22
#include <string>
#include <unordered_map>
23
#include <vector>
24

25
#include "paddle/fluid/distributed/fleet_executor/interceptor.h"
26
#include "paddle/fluid/distributed/fleet_executor/interceptor_message.pb.h"
27
#include "paddle/fluid/distributed/fleet_executor/task_loop_thread_pool.h"
28
#include "paddle/fluid/platform/device_context.h"
29 30
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
31
#include "paddle/fluid/platform/macros.h"
32
#include "paddle/fluid/platform/place.h"
33 34

namespace paddle {
35 36 37 38
namespace framework {
class Scope;
}

39 40 41 42
namespace distributed {

class TaskNode;
class InterceptorMessageServiceImpl;
43
class RuntimeGraph;
44
class MessageBus;
45 46 47

class Carrier final {
 public:
48
  Carrier() = default;
49 50
  Carrier(int64_t rank,
          const std::unordered_map<int64_t, int64_t>& interceptor_id_to_rank)
51 52 53 54 55
      : rank_(rank), interceptor_id_to_rank_(interceptor_id_to_rank) {
    thread_num_ = 1;
    thread_pool_.SetThreadNum(thread_num_);
    thread_pool_.Start();
  }
56
  ~Carrier();
57
  void Init(int64_t rank, std::shared_ptr<RuntimeGraph> runtime_graph,
58 59 60
            framework::Scope* root_scope, framework::Scope* minibatch_scope,
            const std::vector<framework::Scope*>& microbatch_scopes,
            const platform::Place& place);
61

62
  void Release();
63
  void Wait();
64
  void WakeUp();
65 66 67 68

  // Enqueue a message to corresponding interceptor id
  bool EnqueueInterceptorMessage(const InterceptorMessage& interceptor_message);

69 70 71 72 73 74 75
  // get interceptor based on the interceptor id
  Interceptor* GetInterceptor(int64_t interceptor_id);

  // set interceptor with interceptor id
  Interceptor* SetInterceptor(int64_t interceptor_id,
                              std::unique_ptr<Interceptor>);

76
  void SetCreatingFlag(bool flag) {}
77 78 79
  void SetMsgBus(const std::shared_ptr<MessageBus>& msg_bus) {
    msg_bus_ = msg_bus;
  }
80

81 82 83 84
  void Start();

  bool IsInit() const;

85
  bool Send(const InterceptorMessage& msg);
86

87
  void Barrier();
88

89
 private:
90
  DISABLE_COPY_AND_ASSIGN(Carrier);
91

92 93 94
  // create each Interceptor
  void CreateInterceptors();

95 96
  int64_t GetRank(int64_t interceptor_id) const;

97 98 99
  // interceptor logic id to actually interceptor
  std::unordered_map<int64_t, std::unique_ptr<Interceptor>>
      interceptor_idx_to_interceptor_;
100

101 102
  std::vector<int64_t> source_interceptor_ids_;

103
  bool is_init_{false};
104 105 106

  std::mutex running_mutex_;
  std::condition_variable cond_var_;
107
  std::vector<framework::Scope*> microbatch_scopes_;
108
  framework::Scope* root_scope_;
109 110
  framework::Scope* minibatch_scope_;
  paddle::platform::Place place_;
111 112
  paddle::platform::DeviceContext* dev_ctx_{nullptr};
  std::shared_ptr<RuntimeGraph> runtime_graph_;
113
  std::shared_ptr<MessageBus> msg_bus_;
114
  int64_t rank_;
115
  std::unordered_map<int64_t, int64_t> interceptor_id_to_rank_;
116 117 118

  int thread_num_;
  TaskLoopThreadPool thread_pool_;
119 120 121 122
};

}  // namespace distributed
}  // namespace paddle