nccl_context.h 3.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
//   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once

// network header files
Y
Yan Xu 已提交
17
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
18
#include <arpa/inet.h>
19
#include <netdb.h>
20 21 22 23 24 25
#include <netinet/in.h>
#include <stdlib.h>
#include <sys/socket.h>
#endif

#include <string>
26
#include <utility>
27 28
#include <vector>

29
#include "paddle/fluid/framework/scope.h"
30 31
#include "paddle/fluid/framework/variable.h"
#include "paddle/fluid/platform/device_context.h"
32

33
#if defined(PADDLE_WITH_NCCL)
34
#include "paddle/fluid/imperative/all_reduce.h"
35
#include "paddle/fluid/platform/dynload/nccl.h"
36
#include "paddle/fluid/platform/nccl_helper.h"
37
#endif
38 39 40 41

#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/platform/collective_helper.h"
42 43
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/string/split.h"
44
#include "paddle/fluid/string/string_helper.h"
45 46 47 48 49 50 51 52 53

namespace paddle {
namespace imperative {

struct ParallelStrategy {
  int nranks_{1};
  int local_rank_{0};
  std::vector<std::string> trainer_endpoints_{};
  std::string current_endpoint_{""};
54 55
  // TODO(shenliang03): support multi stream communication
  int nrings_{1};
56 57 58 59 60 61 62 63 64 65 66 67
};

class ParallelContext {
 public:
  explicit ParallelContext(const ParallelStrategy& strategy,
                           const platform::Place& place)
      : strategy_(strategy), place_(place) {}

  virtual ~ParallelContext() {}

  virtual void Init() = 0;

68 69 70 71 72 73 74 75
  virtual void AllReduceByStream(const framework::Variable& src,
                                 framework::Variable* dst, int ring_id = 0,
                                 bool use_calc_stream = false) = 0;
#if defined(PADDLE_WITH_NCCL)
  virtual paddle::platform::CUDADeviceContext* GetDeviceContext(
      int ring_id) = 0;
#endif

76 77
  inline int GetNRings() { return strategy_.nrings_; }

78 79 80 81 82
 protected:
  ParallelStrategy strategy_;
  platform::Place place_;
};

83
#if defined(PADDLE_WITH_NCCL)
84
class NCCLParallelContext : public ParallelContext {
85 86 87 88 89 90 91
 public:
  explicit NCCLParallelContext(const ParallelStrategy& strategy,
                               const platform::Place& place)
      : ParallelContext(strategy, place) {}

  ~NCCLParallelContext() {}

92
  void BcastNCCLId(std::vector<ncclUniqueId>& nccl_ids, int root);  // NOLINT
93 94 95

  void Init() override;

96 97 98 99 100 101
  void AllReduceByStream(const framework::Variable& src,
                         framework::Variable* dst, int ring_id,
                         bool use_calc_stream) override;

  paddle::platform::CUDADeviceContext* GetDeviceContext(int ring_id) override;

102
 protected:
103 104
  void RecvNCCLID(const std::string& endpoint,
                  std::vector<ncclUniqueId>& nccl_ids);  // NOLINT
105

106 107
  void SendNCCLID(const std::string& endpoint,
                  const std::vector<ncclUniqueId>& nccl_ids);
108 109 110 111 112
};
#endif

}  //  namespace imperative
}  //  namespace paddle