broadcast_op_handle.cc 3.0 KB
Newer Older
C
chengduoZH 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

C
chengduoZH 已提交
15
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
16 17
#include "paddle/fluid/framework/details/container_cast.h"
#include "paddle/fluid/framework/details/variable_visitor.h"
C
chengduoZH 已提交
18 19 20 21

namespace paddle {
namespace framework {
namespace details {
C
chengduoZH 已提交
22
BroadcastOpHandle::BroadcastOpHandle(const std::vector<Scope *> &local_scopes,
C
chengduoZH 已提交
23 24
                                     const std::vector<platform::Place> &places)
    : local_scopes_(local_scopes), places_(places) {}
C
chengduoZH 已提交
25

C
chengduoZH 已提交
26
void BroadcastOpHandle::RunImpl() {
C
chengduoZH 已提交
27
  // the input and output may have dummy var.
Y
Yu Yang 已提交
28 29 30 31 32 33 34 35 36 37
  VarHandle *in_var_handle;

  {
    auto in_var_handles = DynamicCast<VarHandle>(inputs_);
    PADDLE_ENFORCE_EQ(in_var_handles.size(), 1,
                      "The number of input should be one.");
    in_var_handle = in_var_handles[0];
  }

  auto out_var_handles = DynamicCast<VarHandle>(outputs_);
C
chengduoZH 已提交
38

C
chengduoZH 已提交
39
  PADDLE_ENFORCE_EQ(
40
      out_var_handles.size(), places_.size(),
C
chengduoZH 已提交
41
      "The number of output should equal to the number of places.");
C
chengduoZH 已提交
42

Y
Yu Yang 已提交
43
  // Wait input done, this Wait is asynchronous operation platform::Place
C
chengduoZH 已提交
44
  // &in_place;
Y
Yu Yang 已提交
45
  WaitInputVarGenerated(*in_var_handle);
C
chengduoZH 已提交
46

Y
Yu Yang 已提交
47 48 49 50
  auto *in_var = local_scopes_.at(in_var_handle->scope_idx_)
                     ->FindVar(in_var_handle->name_);
  PADDLE_ENFORCE_NOT_NULL(in_var);
  Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var);
C
chengduoZH 已提交
51

52
  for (auto *out : out_var_handles) {
Y
Yu Yang 已提交
53 54 55 56
    if (*out == *in_var_handle) {
      continue;
    }

57
    auto &out_p = out->place_;
Y
Yu Yang 已提交
58
    auto *out_var = local_scopes_.at(out->scope_idx_)->FindVar(out->name_);
59

Y
Yu Yang 已提交
60
    PADDLE_ENFORCE_EQ(out_p.which(), in_var_handle->place_.which(),
C
chengduoZH 已提交
61
                      "Places must be all on CPU or all on CUDA.");
C
chengduoZH 已提交
62

Y
Yu Yang 已提交
63 64 65
    VariableVisitor::ShareDimsAndLoD(*in_var, out_var);
    VariableVisitor::GetMutableTensor(out_var).mutable_data(out_p,
                                                            in_tensor.type());
C
chengduoZH 已提交
66

C
chengduoZH 已提交
67 68
    auto dev_ctx = dev_ctxes_[out_p];
    RunAndRecordEvent(out_p, [in_tensor, out_var, dev_ctx, out_p] {
Y
Yu Yang 已提交
69 70 71
      paddle::framework::TensorCopy(
          in_tensor, out_p, *(dev_ctx),
          &VariableVisitor::GetMutableTensor(out_var));
C
chengduoZH 已提交
72
    });
C
chengduoZH 已提交
73 74 75
  }
}

Y
Yu Yang 已提交
76 77 78
void BroadcastOpHandle::WaitInputVarGenerated(const VarHandle &in_var) {
  for (auto &pair : dev_ctxes_) {
    in_var.generated_op_->Wait(pair.second);
C
chengduoZH 已提交
79 80 81
  }
}

C
chengduoZH 已提交
82
std::string BroadcastOpHandle::Name() const { return "broadcast"; }
C
chengduoZH 已提交
83 84 85
}  // namespace details
}  // namespace framework
}  // namespace paddle