broadcast_op_handle.cc 5.9 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

C
chengduoZH 已提交
15
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
16 17
#include "paddle/fluid/framework/details/container_cast.h"
#include "paddle/fluid/framework/details/variable_visitor.h"
Y
Yancey1989 已提交
18
#include "paddle/fluid/platform/profiler.h"
C
chengduoZH 已提交
19 20 21 22 23

namespace paddle {
namespace framework {
namespace details {

C
chengduoZH 已提交
24
void BroadcastOpHandle::RunImpl() {
Y
Yancey1989 已提交
25
  platform::RecordEvent record_event(Name(), dev_ctxes_.begin()->second);
Y
Yancey1989 已提交
26

C
chengduoZH 已提交
27
  if (places_.size() == 1) return;
Y
Yu Yang 已提交
28

C
chengduoZH 已提交
29 30
  // The input and output may have dummy vars.
  VarHandle *in_var_handle;
Y
Yu Yang 已提交
31 32 33 34 35 36 37 38
  {
    auto in_var_handles = DynamicCast<VarHandle>(inputs_);
    PADDLE_ENFORCE_EQ(in_var_handles.size(), 1,
                      "The number of input should be one.");
    in_var_handle = in_var_handles[0];
  }

  auto out_var_handles = DynamicCast<VarHandle>(outputs_);
C
chengduoZH 已提交
39

C
chengduoZH 已提交
40
  PADDLE_ENFORCE_EQ(
41
      out_var_handles.size(), places_.size(),
C
chengduoZH 已提交
42
      "The number of output should equal to the number of places.");
C
chengduoZH 已提交
43

C
chengduoZH 已提交
44
  WaitInputVarGenerated();
C
chengduoZH 已提交
45

C
chengduoZH 已提交
46 47 48 49 50 51 52
  std::vector<const Scope *> var_scopes;
  for (auto *s : local_scopes_) {
    var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get<Scope *>());
  }

  auto *in_var =
      var_scopes.at(in_var_handle->scope_idx_)->FindVar(in_var_handle->name_);
Y
Yu Yang 已提交
53 54
  PADDLE_ENFORCE_NOT_NULL(in_var);
  Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var);
C
chengduoZH 已提交
55

56
  InitOutputValue(*in_var_handle, out_var_handles);
C
chengduoZH 已提交
57

C
chengduoZH 已提交
58
  if (platform::is_cpu_place(in_tensor.place())) {
C
chengduoZH 已提交
59
    for (auto *out_var_handle : out_var_handles) {
C
chengduoZH 已提交
60
      if (out_var_handle->IsTheSameVar(*in_var_handle)) {
C
chengduoZH 已提交
61 62
        continue;
      }
C
chengduoZH 已提交
63 64 65 66
      auto &out_p = out_var_handle->place_;
      auto *out_var = var_scopes.at(out_var_handle->scope_idx_)
                          ->FindVar(out_var_handle->name_);

C
chengduoZH 已提交
67
      RunAndRecordEvent(out_p, [in_tensor, out_var] {
C
chengduoZH 已提交
68
        paddle::framework::TensorCopy(
C
chengduoZH 已提交
69
            in_tensor, platform::CPUPlace(),
C
chengduoZH 已提交
70 71 72 73 74
            &VariableVisitor::GetMutableTensor(out_var));
      });
    }
  } else {
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
75 76
    VarHandle *out_handle = nullptr;
    int root_id = boost::get<platform::CUDAPlace>(in_tensor.place()).device;
C
chengduoZH 已提交
77 78
    std::vector<std::function<void()>> broadcast_calls;

C
chengduoZH 已提交
79 80 81
    int type = platform::ToNCCLDataType(in_tensor.type());
    size_t numel = static_cast<size_t>(in_tensor.numel());

C
chengduoZH 已提交
82
    for (auto out_var_handle : out_var_handles) {
C
chengduoZH 已提交
83 84 85
      Variable *out_var = var_scopes.at(out_var_handle->scope_idx_)
                              ->FindVar(out_var_handle->name_);

C
chengduoZH 已提交
86 87
      int dst_id =
          boost::get<platform::CUDAPlace>(out_var_handle->place_).device;
C
chengduoZH 已提交
88

C
chengduoZH 已提交
89
      auto &nccl_ctx = nccl_ctxs_->at(dst_id);
C
chengduoZH 已提交
90 91

      void *send_recv_buffer = nullptr;
C
chengduoZH 已提交
92
      if (root_id == dst_id) {
C
chengduoZH 已提交
93 94 95
        send_recv_buffer = const_cast<void *>(in_tensor.data<void>());
        out_handle = out_var_handle;
      } else {
C
chengduoZH 已提交
96 97 98
        send_recv_buffer = VariableVisitor::GetMutableTensor(out_var)
                               .Resize(in_tensor.dims())
                               .mutable_data(out_var_handle->place_);
C
chengduoZH 已提交
99 100
      }

C
chengduoZH 已提交
101 102 103 104 105 106
      broadcast_calls.emplace_back(
          [send_recv_buffer, numel, type, root_id, &nccl_ctx] {
            PADDLE_ENFORCE(platform::dynload::ncclBcast(
                send_recv_buffer, numel, static_cast<ncclDataType_t>(type),
                root_id, nccl_ctx.comm_, nccl_ctx.stream()));
          });
Y
Yu Yang 已提交
107 108
    }

109 110 111 112 113
    this->RunAndRecordEvent([&] {
      {
        platform::NCCLGroupGuard guard;
        for (auto &call : broadcast_calls) {
          call();
C
chengduoZH 已提交
114
        }
115
      }
C
chengduoZH 已提交
116

117 118 119 120 121 122 123 124 125
      if (!out_handle->IsTheSameVar(*in_var_handle)) {
        auto out_var = var_scopes.at(in_var_handle->scope_idx_)
                           ->FindVar(out_var_handles[0]->name_);
        paddle::framework::TensorCopy(
            in_tensor, in_var_handle->place_,
            *(dev_ctxes_.at(in_var_handle->place_)),
            &VariableVisitor::GetMutableTensor(out_var));
      }
    });
C
chengduoZH 已提交
126
#else
C
chengduoZH 已提交
127
    PADDLE_THROW("CUDA is not enabled.");
C
chengduoZH 已提交
128
#endif
C
chengduoZH 已提交
129 130 131
  }
}

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
void BroadcastOpHandle::InitOutputValue(
    const VarHandle &in_var_handle,
    const std::vector<VarHandle *> &out_var_handles) const {
  std::vector<const Scope *> var_scopes;
  for (auto *s : local_scopes_) {
    var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get<Scope *>());
  }
  auto *in_var =
      var_scopes.at(in_var_handle.scope_idx_)->FindVar(in_var_handle.name_);

  Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var);

  // NOTE: The tensors' Place of input and output must be all on GPU or all on
  // CPU.
  for (auto *out_var_handle : out_var_handles) {
    if (out_var_handle->IsTheSameVar(in_var_handle)) {
      continue;
    }
    auto t_out_p = out_var_handle->place_;
    auto *out_var = var_scopes.at(out_var_handle->scope_idx_)
                        ->FindVar(out_var_handle->name_);
    PADDLE_ENFORCE_NOT_NULL(out_var);
    if (is_gpu_place(in_tensor.place())) {
      PADDLE_ENFORCE(platform::is_gpu_place(t_out_p),
                     "Places of input and output must be all on GPU.");
    } else {
      t_out_p = platform::CPUPlace();
    }
    VariableVisitor::ShareDimsAndLoD(*in_var, out_var);
    VariableVisitor::GetMutableTensor(out_var).mutable_data(t_out_p,
                                                            in_tensor.type());
  }
}

C
chengduoZH 已提交
166
std::string BroadcastOpHandle::Name() const { return "broadcast"; }
C
chengduoZH 已提交
167 168 169
}  // namespace details
}  // namespace framework
}  // namespace paddle