ProcessGroupGloo.h 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <future>
#include <mutex>

#include "paddle/fluid/distributed/collective/ProcessGroup.h"

#ifdef PADDLE_WITH_GLOO
#include "paddle/fluid/framework/fleet/gloo_wrapper.h"
#endif

#include "paddle/fluid/distributed/store/store.h"
#include "paddle/fluid/distributed/store/tcp_store.h"

constexpr const char* GLOO_BACKEND_NAME = "GLOO";

namespace paddle {
namespace distributed {

class ProcessGroupGloo : public ProcessGroup {
 public:
  class GlooTask : public ProcessGroup::Task,
                   public std::enable_shared_from_this<GlooTask> {
   public:
L
lilong12 已提交
39 40
    explicit GlooTask(int rank,
                      const std::vector<phi::DenseTensor>& input_tensors,
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
                      CommType comm_type);

    ~GlooTask() = default;

    virtual void Run() = 0;
    bool Wait(std::chrono::milliseconds timeout) override { return true; }
    bool IsCompleted() override { return true; }
    void Synchronize() override {}

   protected:
    friend class ProcessGroupGloo;
  };

  class GlooStore : public ::gloo::rendezvous::Store {
   public:
56
    explicit GlooStore(const std::shared_ptr<paddle::distributed::Store>& store)
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
        : _store(store) {}

    ~GlooStore() = default;

    std::vector<char> get(const std::string& key) override {
      VLOG(3) << "GlooStore::get";
      auto value = _store->get(key);
      return std::vector<char>(value.begin(), value.end());
    }

    void wait(const std::vector<std::string>& keys) override {
      VLOG(3) << "GlooStore::wait";
      for (auto& key : keys) {
        _store->wait(key);
      }
    }

    void set(const std::string& key, const std::vector<char>& value) override {
      VLOG(3) << "GlooStore::set";
      std::vector<uint8_t> tmp(value.begin(), value.end());
      _store->set(key, tmp);
    }

    void wait(const std::vector<std::string>& keys,
              const std::chrono::milliseconds& timeout) override {
      VLOG(3) << "GlooStore::wait";
      for (auto& key : keys) {
        _store->wait(key);
      }
      // wait(keys);
    }

   protected:
90
    std::shared_ptr<paddle::distributed::Store> _store;
91 92 93 94 95 96 97 98 99 100 101 102
  };

  class GlooOptions {
   public:
    GlooOptions() = default;
    ~GlooOptions() = default;
    static std::shared_ptr<GlooOptions> create() {
      return std::make_shared<GlooOptions>();
    }
    std::shared_ptr<::gloo::transport::Device> device;
  };

103 104
  explicit ProcessGroupGloo(
      const std::shared_ptr<paddle::distributed::Store>& store, int rank,
L
lilong12 已提交
105
      int world_size, int gid, std::shared_ptr<GlooOptions> options);
106 107 108 109

  ~ProcessGroupGloo() = default;

  std::shared_ptr<ProcessGroup::Task> Broadcast(
L
lilong12 已提交
110 111
      std::vector<phi::DenseTensor>& inputs,
      std::vector<phi::DenseTensor>& outputs,
112 113 114
      const BroadcastOptions& = BroadcastOptions()) override;

  std::shared_ptr<ProcessGroup::Task> AllReduce(
L
lilong12 已提交
115 116
      std::vector<phi::DenseTensor>& inputs,
      std::vector<phi::DenseTensor>& outputs,
117 118
      const AllreduceOptions& opts = AllreduceOptions()) override;

119 120 121 122
  std::shared_ptr<ProcessGroup::Task> Barrier(
      const BarrierOptions& = BarrierOptions()) override;

  std::shared_ptr<ProcessGroup::Task> AllGather(
L
lilong12 已提交
123 124
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors) override;
125 126

  std::shared_ptr<ProcessGroup::Task> Reduce(
L
lilong12 已提交
127 128 129 130 131 132 133 134
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors,
      const ReduceOptions& opts) override;

  std::shared_ptr<ProcessGroup::Task> Scatter(
      std::vector<phi::DenseTensor>& in_tensors,
      std::vector<phi::DenseTensor>& out_tensors,
      const ScatterOptions&) override;
135

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
  std::shared_ptr<::gloo::Context> get_context() { return _context; }
  uint64_t next_tag() { return _tag++; }

  const std::string GetBackendName() const override {
    return GLOO_BACKEND_NAME;
  }

  // Helper functions for Gloo.
  static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname(
      const std::string& hostname);
  static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface(
      const std::string& ifname);
  static std::shared_ptr<::gloo::transport::Device> createDefaultDevice();

 protected:
  uint32_t _tag;
  std::shared_ptr<gloo::rendezvous::Context> _context;
153
  std::shared_ptr<::gloo::rendezvous::Store> _store;
154 155 156 157
};

}  // namespace distributed
}  // namespace paddle