sendrecvop_utils.cc 3.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

T
typhoonzero 已提交
15
#ifdef PADDLE_WITH_CUDA
T
fix ci  
typhoonzero 已提交
16
#include <nccl.h>
T
typhoonzero 已提交
17
#endif
18
#include <sys/time.h>
Y
Yi Wang 已提交
19 20
#include <thread>  // NOLINT

21
#include "paddle/fluid/framework/data_type.h"
22
#include "paddle/fluid/operators/distributed/sendrecvop_utils.h"
23
#include "paddle/fluid/operators/distributed/variable_response.h"
G
gongweibao 已提交
24 25 26

namespace paddle {
namespace operators {
27
namespace distributed {
G
gongweibao 已提交
28

T
typhoonzero 已提交
29 30
using VarMsg = sendrecv::VariableMessage;

Y
Yu Yang 已提交
31
static TensorPayload GetCommunicationAllocationFromTensor(
Y
Yu Yang 已提交
32 33
    const platform::DeviceContext& ctx, const framework::Tensor& tensor) {
  if (is_gpu_place(ctx.GetPlace())) {
34
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
35 36 37 38 39 40 41
    PADDLE_ENFORCE(is_gpu_place(tensor.place()));
    auto& gpu_dev_ctx =
        reinterpret_cast<const platform::CUDADeviceContext&>(ctx);
    auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type());
    platform::CUDAPinnedPlace cuda_pinned;
    auto result = memory::AllocShared(
        cuda_pinned, copy_size, memory::allocation::Allocator::kCrossDevice);
42

Y
Yu Yang 已提交
43 44
    memory::Copy(cuda_pinned, result->ptr(),
                 boost::get<platform::CUDAPlace>(tensor.place()),
Y
Yu Yang 已提交
45
                 tensor.data<void>(), copy_size, gpu_dev_ctx.stream());
Y
Yu Yang 已提交
46 47

    ctx.Wait();
Y
Yu Yang 已提交
48
    return TensorPayload(result);
Y
Yu Yang 已提交
49
#else
Y
Yu Yang 已提交
50
    PADDLE_THROW("This situation should not be happened");
Y
Yu Yang 已提交
51 52
#endif
  } else {
Y
Yu Yang 已提交
53
    return TensorPayload(tensor);
Y
Yu Yang 已提交
54 55
  }
}
Y
Yu Yang 已提交
56 57 58
TensorPayload GetTensorPayload(framework::Variable* var,
                               const platform::DeviceContext& ctx,
                               VarMsg* request) {
T
typhoonzero 已提交
59
  auto tensor = var->Get<framework::LoDTensor>();
T
typhoonzero 已提交
60
  // FIXME(wuyi): data types in send_recv.proto is copied from
T
typhoonzero 已提交
61
  // framework.proto
T
typhoonzero 已提交
62 63
  request->set_data_type(
      static_cast<VarMsg::Type>(framework::ToDataType(tensor.type())));
T
typhoonzero 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76
  for (auto& dim : framework::vectorize(tensor.dims())) {
    request->add_dims(dim);
  }
  const framework::LoD lod = tensor.lod();
  if (lod.size() > 0) {
    request->set_lod_level(lod.size());
    for (auto& each : lod) {
      VarMsg::LodData* lod_inner = request->add_lod();
      for (auto& d : each) {
        lod_inner->add_lod_data(d);
      }
    }
  }
Y
Yu Yang 已提交
77
  return GetCommunicationAllocationFromTensor(ctx, tensor);
T
typhoonzero 已提交
78 79
}

Y
Yu Yang 已提交
80 81 82
TensorPayload GetSelectedRowsPayload(framework::Variable* var,
                                     const platform::DeviceContext& ctx,
                                     VarMsg* request) {
T
typhoonzero 已提交
83
  auto* slr = var->GetMutable<framework::SelectedRows>();
T
typhoonzero 已提交
84 85
  request->set_data_type(
      static_cast<VarMsg::Type>(framework::ToDataType(slr->value().type())));
T
typhoonzero 已提交
86 87 88 89 90 91 92 93
  request->set_lod_level(0);
  request->set_slr_height(slr->height());

  for (auto& dim : framework::vectorize(slr->value().dims())) {
    request->add_dims(dim);
  }

  auto* tensor = slr->mutable_value();
Y
Yu Yang 已提交
94
  return GetCommunicationAllocationFromTensor(ctx, *tensor);
T
typhoonzero 已提交
95 96
}

Y
Yu Yang 已提交
97 98 99 100 101 102 103 104 105 106 107
TensorPayload::TensorPayload(std::shared_ptr<memory::Allocation> allocation)
    : allocation_(allocation), offset_(0), memory_size_(allocation->size()) {}
TensorPayload::TensorPayload(const framework::Tensor& tensor)
    : allocation_(tensor.Holder()),
      offset_(tensor.offset()),
      memory_size_(tensor.numel() * framework::SizeOfType(tensor.type())) {}
void* TensorPayload::ptr() const {
  return reinterpret_cast<void*>(
      reinterpret_cast<uintptr_t>(allocation_->ptr()) + offset_);
}
size_t TensorPayload::memory_size() const { return memory_size_; }
108
}  // namespace distributed
G
gongweibao 已提交
109
}  // namespace operators
Y
Yancey 已提交
110
}  // namespace paddle