broadcast_op_handle.cc 5.8 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

C
chengduoZH 已提交
15
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
16 17
#include "paddle/fluid/framework/details/container_cast.h"
#include "paddle/fluid/framework/details/variable_visitor.h"
C
chengduoZH 已提交
18 19 20 21 22

namespace paddle {
namespace framework {
namespace details {

C
chengduoZH 已提交
23
void BroadcastOpHandle::RunImpl() {
C
chengduoZH 已提交
24
  if (places_.size() == 1) return;
Y
Yu Yang 已提交
25

C
chengduoZH 已提交
26 27
  // The input and output may have dummy vars.
  VarHandle *in_var_handle;
Y
Yu Yang 已提交
28 29 30 31 32 33 34 35
  {
    auto in_var_handles = DynamicCast<VarHandle>(inputs_);
    PADDLE_ENFORCE_EQ(in_var_handles.size(), 1,
                      "The number of input should be one.");
    in_var_handle = in_var_handles[0];
  }

  auto out_var_handles = DynamicCast<VarHandle>(outputs_);
C
chengduoZH 已提交
36

C
chengduoZH 已提交
37
  PADDLE_ENFORCE_EQ(
38
      out_var_handles.size(), places_.size(),
C
chengduoZH 已提交
39
      "The number of output should equal to the number of places.");
C
chengduoZH 已提交
40

Y
Yu Yang 已提交
41
  // Wait input done, this Wait is asynchronous operation platform::Place
C
chengduoZH 已提交
42
  // &in_place;
Y
Yu Yang 已提交
43
  WaitInputVarGenerated(*in_var_handle);
C
chengduoZH 已提交
44

C
chengduoZH 已提交
45 46 47 48 49 50 51
  std::vector<const Scope *> var_scopes;
  for (auto *s : local_scopes_) {
    var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get<Scope *>());
  }

  auto *in_var =
      var_scopes.at(in_var_handle->scope_idx_)->FindVar(in_var_handle->name_);
Y
Yu Yang 已提交
52
  PADDLE_ENFORCE_NOT_NULL(in_var);
C
chengduoZH 已提交
53

Y
Yu Yang 已提交
54
  Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var);
C
chengduoZH 已提交
55

C
chengduoZH 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
  // NOTE(zcd): the Place of input can be get from in_tensor and in_var_handle ,
  // maybe they are different, because the Place that getting from in_tensor is
  // determined at runtime, the other is determined at building SSA graph stage.
  // If they are different, DataTransform should be applied. Currently, it has
  // not been done yet.
  for (auto *out_var_handle : out_var_handles) {
    if (*out_var_handle == *in_var_handle) {
      continue;
    }
    auto &out_p = out_var_handle->place_;
    auto *out_var = var_scopes.at(out_var_handle->scope_idx_)
                        ->FindVar(out_var_handle->name_);
    PADDLE_ENFORCE_NOT_NULL(out_var);
    PADDLE_ENFORCE_EQ(
        out_p.which(), in_tensor.place().which(),
        "Currently, Places of input and output must be all on CPU "
        "or all on GPU.");
    VariableVisitor::ShareDimsAndLoD(*in_var, out_var);
    VariableVisitor::GetMutableTensor(out_var).mutable_data(out_p,
                                                            in_tensor.type());
  }

C
chengduoZH 已提交
78
  if (platform::is_cpu_place(in_tensor.place())) {
C
chengduoZH 已提交
79 80
    for (auto *out_var_handle : out_var_handles) {
      if (*out_var_handle == *in_var_handle) {
C
chengduoZH 已提交
81 82 83
        continue;
      }

C
chengduoZH 已提交
84
      auto &out_p = out_var_handle->place_;
C
chengduoZH 已提交
85
      auto dev_ctx = dev_ctxes_.at(out_p);
C
chengduoZH 已提交
86 87 88
      auto *out_var = var_scopes.at(out_var_handle->scope_idx_)
                          ->FindVar(out_var_handle->name_);

C
chengduoZH 已提交
89 90 91 92 93 94 95 96
      RunAndRecordEvent(out_p, [in_tensor, out_var, dev_ctx, out_p] {
        paddle::framework::TensorCopy(
            in_tensor, out_p, *dev_ctx,
            &VariableVisitor::GetMutableTensor(out_var));
      });
    }
  } else {
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
97 98
    VarHandle *out_handle = nullptr;
    int root_id = boost::get<platform::CUDAPlace>(in_tensor.place()).device;
C
chengduoZH 已提交
99 100
    std::vector<std::function<void()>> broadcast_calls;

C
chengduoZH 已提交
101
    for (auto out_var_handle : out_var_handles) {
C
chengduoZH 已提交
102 103 104
      Variable *out_var = var_scopes.at(out_var_handle->scope_idx_)
                              ->FindVar(out_var_handle->name_);

C
chengduoZH 已提交
105 106
      int dst_id =
          boost::get<platform::CUDAPlace>(out_var_handle->place_).device;
C
chengduoZH 已提交
107

C
chengduoZH 已提交
108
      auto &nccl_ctx = nccl_ctxs_->at(dst_id);
C
chengduoZH 已提交
109 110

      void *send_recv_buffer = nullptr;
C
chengduoZH 已提交
111
      if (root_id == dst_id) {
C
chengduoZH 已提交
112 113 114 115 116 117 118 119 120
        send_recv_buffer = const_cast<void *>(in_tensor.data<void>());
        out_handle = out_var_handle;
      } else {
        send_recv_buffer =
            VariableVisitor::GetMutableTensor(out_var).mutable_data(
                out_var_handle->place_);
      }

      int type = platform::ToNCCLDataType(in_tensor.type());
C
chengduoZH 已提交
121 122 123 124 125 126 127
      size_t numel = static_cast<size_t>(in_tensor.numel());
      broadcast_calls.emplace_back(
          [send_recv_buffer, numel, type, root_id, &nccl_ctx] {
            PADDLE_ENFORCE(platform::dynload::ncclBcast(
                send_recv_buffer, numel, static_cast<ncclDataType_t>(type),
                root_id, nccl_ctx.comm_, nccl_ctx.stream()));
          });
Y
Yu Yang 已提交
128 129
    }

C
chengduoZH 已提交
130 131 132 133 134 135 136
    this->RunAndRecordEvent([&] {
      {
        platform::NCCLGroupGuard guard;
        for (auto &call : broadcast_calls) {
          call();
        }
      }
C
chengduoZH 已提交
137
      // TODO(zcd): Maybe the unequal operator is not appropriate here.
C
chengduoZH 已提交
138 139 140 141 142 143 144 145
      if (*out_handle != *in_var_handle) {
        auto out_var = var_scopes.at(in_var_handle->scope_idx_)
                           ->FindVar(out_var_handles[0]->name_);
        paddle::framework::TensorCopy(
            in_tensor, in_var_handle->place_,
            *(dev_ctxes_.at(in_var_handle->place_)),
            &VariableVisitor::GetMutableTensor(out_var));
      }
C
chengduoZH 已提交
146
    });
C
chengduoZH 已提交
147
#else
C
chengduoZH 已提交
148
    PADDLE_THROW("CUDA is not enabled.");
C
chengduoZH 已提交
149
#endif
C
chengduoZH 已提交
150 151 152
  }
}

Y
Yu Yang 已提交
153
void BroadcastOpHandle::WaitInputVarGenerated(const VarHandle &in_var) {
154 155 156 157
  if (in_var.generated_op_) {
    for (auto &pair : dev_ctxes_) {
      in_var.generated_op_->Wait(pair.second);
    }
C
chengduoZH 已提交
158 159 160
  }
}

C
chengduoZH 已提交
161
std::string BroadcastOpHandle::Name() const { return "broadcast"; }
C
chengduoZH 已提交
162 163 164
}  // namespace details
}  // namespace framework
}  // namespace paddle