ProcessGroupGloo.h 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <future>
#include <mutex>

#include "paddle/fluid/distributed/collective/ProcessGroup.h"

#ifdef PADDLE_WITH_GLOO
#include "paddle/fluid/framework/fleet/gloo_wrapper.h"
#endif

#include "paddle/fluid/distributed/store/store.h"
#include "paddle/fluid/distributed/store/tcp_store.h"

namespace paddle {
namespace distributed {

class ProcessGroupGloo : public ProcessGroup {
 public:
  class GlooTask : public ProcessGroup::Task,
                   public std::enable_shared_from_this<GlooTask> {
   public:
37 38
    explicit GlooTask(int rank,
                      const std::vector<phi::DenseTensor>& input_tensors,
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
                      CommType comm_type);

    ~GlooTask() = default;

    virtual void Run() = 0;
    bool Wait(std::chrono::milliseconds timeout) override { return true; }
    bool IsCompleted() override { return true; }
    void Synchronize() override {}

   protected:
    friend class ProcessGroupGloo;
  };

  class GlooStore : public ::gloo::rendezvous::Store {
   public:
54
    explicit GlooStore(const std::shared_ptr<paddle::distributed::Store>& store)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
        : _store(store) {}

    ~GlooStore() = default;

    std::vector<char> get(const std::string& key) override {
      VLOG(3) << "GlooStore::get";
      auto value = _store->get(key);
      return std::vector<char>(value.begin(), value.end());
    }

    void wait(const std::vector<std::string>& keys) override {
      VLOG(3) << "GlooStore::wait";
      for (auto& key : keys) {
        _store->wait(key);
      }
    }

    void set(const std::string& key, const std::vector<char>& value) override {
      VLOG(3) << "GlooStore::set";
      std::vector<uint8_t> tmp(value.begin(), value.end());
      _store->set(key, tmp);
    }

    void wait(const std::vector<std::string>& keys,
              const std::chrono::milliseconds& timeout) override {
      VLOG(3) << "GlooStore::wait";
      for (auto& key : keys) {
        _store->wait(key);
      }
      // wait(keys);
    }

   protected:
88
    std::shared_ptr<paddle::distributed::Store> _store;
89 90 91 92 93 94 95 96 97 98 99 100
  };

  class GlooOptions {
   public:
    GlooOptions() = default;
    ~GlooOptions() = default;
    static std::shared_ptr<GlooOptions> create() {
      return std::make_shared<GlooOptions>();
    }
    std::shared_ptr<::gloo::transport::Device> device;
  };

101
  explicit ProcessGroupGloo(
102 103 104 105
      const std::shared_ptr<paddle::distributed::Store>& store,
      int rank,
      int world_size,
      int gid,
106
      std::shared_ptr<GlooOptions> options);
107 108 109

  ~ProcessGroupGloo() = default;

L
LiYuRio 已提交
110 111 112
  std::shared_ptr<ProcessGroup::Task> AllGather(
      phi::DenseTensor* out_tensor,
      const phi::DenseTensor& in_tensor,
113 114
      int64_t offset,  // for compatibility, no use now
      int64_t numel,   // for compatibility, no use now
L
LiYuRio 已提交
115 116
      bool sync_op) override;

117 118 119 120 121 122 123
  std::shared_ptr<ProcessGroup::Task> Broadcast(
      phi::DenseTensor* out_tensor,
      const phi::DenseTensor& in_tensor,
      const BroadcastOptions& opts,
      bool sync_op) override;

  // TODO(sunyilun): methods below will be removed later
124
  std::shared_ptr<ProcessGroup::Task> Broadcast(
125 126
      std::vector<phi::DenseTensor>& inputs,
      std::vector<phi::DenseTensor>& outputs,
127 128
      const BroadcastOptions& = BroadcastOptions()) override;

129 130 131 132 133 134
  std::shared_ptr<ProcessGroup::Task> Broadcast(
      std::vector<phi::DenseTensor>& inputs,
      std::vector<phi::DenseTensor>& outputs,
      const BroadcastOptions& opts,
      bool sync_op) override;

135
  std::shared_ptr<ProcessGroup::Task> AllReduce(
136 137
      std::vector<phi::DenseTensor>& inputs,
      std::vector<phi::DenseTensor>& outputs,
138 139
      const AllreduceOptions& opts = AllreduceOptions()) override;

140 141 142 143 144 145
  std::shared_ptr<ProcessGroup::Task> AllReduce(
      std::vector<phi::DenseTensor>& inputs,
      std::vector<phi::DenseTensor>& outputs,
      const AllreduceOptions& opts,
      bool sync_op) override;

146 147 148 149
  std::shared_ptr<ProcessGroup::Task> Barrier(
      const BarrierOptions& = BarrierOptions()) override;

  std::shared_ptr<ProcessGroup::Task> AllGather(
150 151
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors) override;
152

L
LiYuRio 已提交
153 154 155 156 157
  std::shared_ptr<ProcessGroup::Task> AllGather(
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors,
      bool sync_op) override;

158 159 160 161 162 163
  std::shared_ptr<ProcessGroup::Task> Reduce(
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors,
      const ReduceOptions& opts,
      bool sync_op) override;

164
  std::shared_ptr<ProcessGroup::Task> Reduce(
165 166 167 168
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors,
      const ReduceOptions& opts) override;

169 170 171 172 173 174
  std::shared_ptr<ProcessGroup::Task> Scatter(
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors,
      const ScatterOptions&,
      bool sync_op) override;

175 176 177 178
  std::shared_ptr<ProcessGroup::Task> Scatter(
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors,
      const ScatterOptions&) override;
179

180 181 182
  std::shared_ptr<::gloo::Context> get_context() { return _context; }
  uint64_t next_tag() { return _tag++; }

L
LiYuRio 已提交
183
  std::string GetBackendName() const override { return "GLOO"; }
184

185 186 187 188 189
  const phi::DeviceContext& GetDeviceContext(
      const Place& place) const override {
    return *platform::DeviceContextPool::Instance().Get(place);
  }

190 191 192 193 194 195 196 197 198 199
  // Helper functions for Gloo.
  static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname(
      const std::string& hostname);
  static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface(
      const std::string& ifname);
  static std::shared_ptr<::gloo::transport::Device> createDefaultDevice();

 protected:
  uint32_t _tag;
  std::shared_ptr<gloo::rendezvous::Context> _context;
200
  std::shared_ptr<::gloo::rendezvous::Store> _store;
201 202 203 204
};

}  // namespace distributed
}  // namespace paddle