sendrecvop_utils.cc 8.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/detail/sendrecvop_utils.h"
Y
Yi Wang 已提交
16

T
typhoonzero 已提交
17
#ifdef PADDLE_WITH_CUDA
T
fix ci  
typhoonzero 已提交
18
#include <nccl.h>
T
typhoonzero 已提交
19
#endif
20
#include <sys/time.h>
Y
Yi Wang 已提交
21 22
#include <thread>  // NOLINT

23 24 25 26 27
#include "google/protobuf/io/coded_stream.h"
#include "google/protobuf/io/zero_copy_stream.h"
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/operators/detail/bytebuffer_stream.h"
#include "paddle/fluid/operators/detail/proto_encoder_helper.h"
28
#include "paddle/fluid/operators/detail/variable_response.h"
G
gongweibao 已提交
29 30 31 32 33

namespace paddle {
namespace operators {
namespace detail {

34 35
void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
                           const platform::DeviceContext& ctx,
Y
Yancey1989 已提交
36 37
                           ::grpc::ByteBuffer* msg,
                           const std::string& out_name) {
38 39 40 41 42 43 44
  using VarMsg = sendrecv::VariableMessage;
  // When using GPU, need to free the copied CPU buffer
  // when the ByteBuffer destroies
  // TODO(typhoonzero): add unref here, if we have dependent
  // parallelism execution, need to know when to free the tensor.
  DestroyCallback destroy_callback = [](void* backing) {};

G
gongweibao 已提交
45 46 47
  auto buffer = std::unique_ptr<char[]>(new char[1024]);
  void* buf = buffer.get();

Y
Yancey 已提交
48
  void* payload = nullptr;
T
typhoonzero 已提交
49
  size_t payload_size = 0;
Y
Yi Wang 已提交
50
  ProtoEncodeHelper e(static_cast<char*>(buf), 1024);
51 52 53 54 55
  e.WriteString(VarMsg::kVarnameFieldNumber, name);
  if (var->IsType<framework::LoDTensor>()) {
    e.WriteUint64(VarMsg::kTypeFieldNumber, 0);
  } else if (var->IsType<framework::SelectedRows>()) {
    e.WriteUint64(VarMsg::kTypeFieldNumber, 1);
T
typhoonzero 已提交
56
#ifdef PADDLE_WITH_CUDA
T
typhoonzero 已提交
57 58
  } else if (var->IsType<ncclUniqueId>()) {
    // NOTE: sendrecv only support RAW type for NCCL_ID
T
typhoonzero 已提交
59
    VLOG(3) << "serilizing: setting var type nccl id";
T
typhoonzero 已提交
60
    e.WriteUint64(VarMsg::kTypeFieldNumber, 2);
T
typhoonzero 已提交
61
#endif
62 63
  }

Y
Yancey1989 已提交
64 65 66
  if (!out_name.empty()) {
    e.WriteString(VarMsg::kOutVarnameFieldNumber, out_name);
  }
T
typhoonzero 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
  if (var->IsType<framework::LoDTensor>()) {
    // ===========================Tensor==================================
    auto tensor = var->Get<framework::LoDTensor>();
    e.WriteUint64(VarMsg::kDataTypeFieldNumber,
                  framework::ToDataType(tensor.type()));
    for (auto& dim : framework::vectorize(tensor.dims())) {
      e.WriteUint64(VarMsg::kDimsFieldNumber, dim);
    }
    auto lod = tensor.lod();  // std::vector<Vector<size_t>>
    if (lod.size() > 0) {
      e.WriteUint64(VarMsg::kLodLevelFieldNumber, lod.size());

      for (auto& each : lod) {
        e.WriteVarlengthBeginning(VarMsg::kLodFieldNumber,
                                  2 +      // tag + varintlength of submessage
                                      1 +  // kLodDataFieldNumber
                                      each.size());
        // auto copied from GPU
        for (auto& d : each) {
          e.WriteUint64(VarMsg::LodData::kLodDataFieldNumber, d);
87 88
        }
      }
T
typhoonzero 已提交
89 90
    }
    if (platform::is_gpu_place(ctx.GetPlace())) {
91
#ifdef PADDLE_WITH_CUDA
T
typhoonzero 已提交
92 93 94 95 96 97 98 99 100 101 102 103
      PADDLE_ENFORCE(platform::is_gpu_place(tensor.place()));
      platform::CPUPlace cpu;
      auto& gpu_dev_ctx = static_cast<const platform::CUDADeviceContext&>(ctx);
      auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type());
      payload = memory::Alloc(cpu, copy_size);

      memory::Copy(cpu, payload,
                   boost::get<platform::CUDAPlace>(tensor.place()),
                   reinterpret_cast<const void*>(tensor.data<void>()),
                   copy_size, gpu_dev_ctx.stream());
      ctx.Wait();
      destroy_callback = [](void* backing) {
104
        platform::CPUPlace cpu;
T
typhoonzero 已提交
105 106
        memory::Free(cpu, backing);
      };
107

108
#endif
T
typhoonzero 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
    } else {
      payload = tensor.data<void>();
    }
    payload_size = tensor.numel() * framework::SizeOfType(tensor.type());
    e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size);
  } else if (var->IsType<framework::SelectedRows>()) {
    // ===========================SELECTED
    // ROWS==================================
    // TODO(typhoonzero): selectedrows implement should not use unique_ptr
    auto* slr = var->GetMutable<framework::SelectedRows>();
    e.WriteUint64(VarMsg::kDataTypeFieldNumber,
                  framework::ToDataType(slr->value().type()));
    for (auto& dim : framework::vectorize(slr->value().dims())) {
      e.WriteUint64(VarMsg::kDimsFieldNumber, dim);
    }
    e.WriteUint64(VarMsg::kLodLevelFieldNumber, 0);
    e.WriteUint64(VarMsg::kSlrHeightFieldNumber, slr->height());
    auto* tensor = slr->mutable_value();
    if (platform::is_gpu_place(ctx.GetPlace())) {
128
#ifdef PADDLE_WITH_CUDA
T
typhoonzero 已提交
129 130 131 132 133 134 135 136 137 138
      platform::CPUPlace cpu;
      auto& gpu_dev_ctx = static_cast<const platform::CUDADeviceContext&>(ctx);
      auto copy_size = tensor->numel() * framework::SizeOfType(tensor->type());
      payload = memory::Alloc(cpu, copy_size);
      memory::Copy(cpu, payload,
                   boost::get<platform::CUDAPlace>(tensor->place()),
                   reinterpret_cast<const void*>(tensor->data<void>()),
                   copy_size, gpu_dev_ctx.stream());
      ctx.Wait();
      destroy_callback = [](void* backing) {
139
        platform::CPUPlace cpu;
T
typhoonzero 已提交
140 141
        memory::Free(cpu, backing);
      };
142
#endif
T
typhoonzero 已提交
143 144 145 146 147
    } else {
      payload = slr->mutable_value()->data<void>();
    }
    payload_size = tensor->numel() * framework::SizeOfType(tensor->type());
    e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size);
T
typhoonzero 已提交
148
#ifdef PADDLE_WITH_CUDA
T
typhoonzero 已提交
149 150 151 152 153 154
  } else if (var->IsType<ncclUniqueId>()) {
    // ===========================NCCL ID==================================
    e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber,
                              NCCL_UNIQUE_ID_BYTES);
    ncclUniqueId* uid = var->GetMutable<ncclUniqueId>();
    e.WriteRawBytes(std::string(uid->internal, NCCL_UNIQUE_ID_BYTES));
T
typhoonzero 已提交
155
#endif
T
typhoonzero 已提交
156 157 158
  } else {
    PADDLE_THROW("Serialize does not support type: %s",
                 typeid(var->Type()).name());
159
  }
T
typhoonzero 已提交
160
#ifdef PADDLE_WITH_CUDA
T
typhoonzero 已提交
161
  if (var->IsType<ncclUniqueId>()) {
T
typhoonzero 已提交
162 163 164 165 166 167 168
    // for serialize NCCL_ID
    ::grpc::Slice slices(e.size());
    memcpy(const_cast<uint8_t*>(slices.begin()), e.data(), e.size());
    ::grpc::ByteBuffer tmp(&slices, 1);
    msg->Swap(&tmp);
    return;
  }
T
typhoonzero 已提交
169
#endif
170 171 172 173 174 175 176 177 178 179 180 181 182 183
  // steal reference of tensor data
  ::grpc::Slice slices[4];  // metadata, tensor, rows meta, rows
  int num_slices = 2;       // only SelectedRows have rows buffer
  slices[0] = ::grpc::Slice(e.size());
  memcpy(const_cast<uint8_t*>(slices[0].begin()), e.data(), e.size());
  slices[1] = ::grpc::Slice(
      grpc_slice_new_with_user_data(payload, payload_size, destroy_callback,
                                    static_cast<char*>(payload)),
      ::grpc::Slice::STEAL_REF);

  if (framework::ToVarType(var->Type()) ==
      framework::proto::VarType_Type_SELECTED_ROWS) {
    auto* slr = var->GetMutable<framework::SelectedRows>();

Y
Yi Wang 已提交
184
    ProtoEncodeHelper e2(static_cast<char*>(buf), 128);
185 186
    // NOTE: rows is of type int64_t
    size_t rows_memory_size =
T
typhoonzero 已提交
187
        slr->rows().size() * framework::SizeOfType(typeid(int64_t));
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
    e2.WriteVarlengthBeginning(VarMsg::kRowsFieldNumber, rows_memory_size);
    slices[2] = ::grpc::Slice(e2.size());
    memcpy(const_cast<uint8_t*>(slices[2].begin()), e2.data(), e2.size());

    slices[3] = ::grpc::Slice(
        grpc_slice_new_with_user_data(
            const_cast<void*>(
                reinterpret_cast<const void*>(slr->rows().data())),
            rows_memory_size,
            [](void* backing) {
              // TODO(typhoonzero): add unref here, same as above.
            },
            const_cast<char*>(
                reinterpret_cast<const char*>(slr->rows().data()))),
        ::grpc::Slice::STEAL_REF);
    num_slices = 4;
  }

  ::grpc::ByteBuffer tmp(&slices[0], num_slices);
  msg->Swap(&tmp);
}

void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg,
                               const platform::DeviceContext& ctx,
212
                               const framework::Scope* scope,
Y
Yi Wang 已提交
213
                               framework::Variable** var) {
214
  operators::detail::VariableResponse resp(scope, &ctx);
215
  PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!");
Y
Yi Wang 已提交
216
  *var = resp.GetVar();
217 218
}

G
gongweibao 已提交
219 220
}  // namespace detail
}  // namespace operators
Y
Yancey 已提交
221
}  // namespace paddle