grpc_client.cc 6.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "grpc_client.h"
16
#include <sys/time.h>
Y
Yi Wang 已提交
17
#include "paddle/fluid/framework/threadpool.h"
18

G
gongweibao 已提交
19 20 21 22 23 24 25 26 27
namespace paddle {
namespace operators {
namespace detail {

bool RPCClient::AsyncSendVariable(const std::string& ep,
                                  const platform::DeviceContext& ctx,
                                  const framework::Scope& scope,
                                  const std::string& var_name,
                                  int64_t time_out) {
28 29 30 31 32 33 34 35
  const platform::DeviceContext* p_ctx = &ctx;
  const std::string ep_val = ep;
  const std::string var_name_val = var_name;
  const framework::Scope* p_scope = &scope;
  const auto ch = GetChannel(ep_val);

  framework::Async([var_name_val, p_ctx, ep_val, p_scope, time_out, ch, this] {
    auto* var = p_scope->FindVar(var_name_val);
36 37 38

    ::grpc::ByteBuffer req;
    SerializeToByteBuffer(var_name_val, var, *p_ctx, &req);
39 40 41 42 43 44 45 46 47 48 49 50 51

    // varhandle
    VarHandle var_h;
    var_h.ep = ep_val;
    var_h.scope = p_scope;
    var_h.name = var_name_val;
    var_h.ctx = p_ctx;

    // stub context
    SendProcessor* s = new SendProcessor(ch);
    s->Prepare(var_h, time_out);
    s->response_call_back_ = NULL;

52 53
    auto call = s->stub_g_.PrepareUnaryCall(
        s->context_.get(), "/sendrecv.SendRecvService/SendVariable", req, &cq_);
54 55
    call->StartCall();
    call->Finish(&s->reply_, &s->status_, (void*)s);
56
  });
G
gongweibao 已提交
57 58 59 60 61 62 63

  req_count_++;

  return true;
}

void ProcGetResponse(const VarHandle& var_h,
64 65 66 67 68 69 70 71 72 73 74 75 76
                     // const sendrecv::VariableMessage& ret_msg) {
                     const ::grpc::ByteBuffer& ret_msg) {
  framework::Variable* outvar = NULL;
  DeserializeFromByteBuffer(ret_msg, *var_h.ctx, var_h.scope, outvar);
}

template <typename T>
void RequestToByteBuffer(const T& proto, ::grpc::ByteBuffer* result) {
  ::grpc::Slice slice(proto.ByteSizeLong());
  proto.SerializeWithCachedSizesToArray(
      const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(slice.begin())));
  ::grpc::ByteBuffer tmp(&slice, 1);
  result->Swap(&tmp);
G
gongweibao 已提交
77 78 79 80 81 82 83
}

bool RPCClient::AsyncGetVariable(const std::string& ep,
                                 const platform::DeviceContext& ctx,
                                 const framework::Scope& scope,
                                 const std::string& var_name,
                                 int64_t time_out) {
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
  const platform::DeviceContext* p_ctx = &ctx;
  const std::string ep_val = ep;
  const std::string var_name_val = var_name;
  const framework::Scope* p_scope = &scope;
  const auto ch = GetChannel(ep_val);

  framework::Async([var_name_val, ep_val, p_scope, p_ctx, time_out, ch, this] {
    sendrecv::VariableMessage req;
    req.set_varname(var_name_val);

    // varhandle
    VarHandle var_h;
    var_h.ep = ep_val;
    var_h.scope = p_scope;
    var_h.name = var_name_val;
    var_h.ctx = p_ctx;

    // stub context
    GetProcessor* s = new GetProcessor(ch);
    s->Prepare(var_h, time_out);
    s->response_call_back_ = ProcGetResponse;

106 107 108
    ::grpc::ByteBuffer buf;
    RequestToByteBuffer<sendrecv::VariableMessage>(req, &buf);

109 110
    auto call = s->stub_g_.PrepareUnaryCall(
        s->context_.get(), "/sendrecv.SendRecvService/GetVariable", buf, &cq_);
111 112
    call->StartCall();
    call->Finish(&s->reply_, &s->status_, (void*)s);
113
  });
G
gongweibao 已提交
114 115 116 117 118 119

  req_count_++;

  return true;
}

120
void RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) {
Y
Yancey 已提交
121 122 123 124 125 126 127 128 129 130
  const auto ch = GetChannel(ep);

  BatchBarrierProcessor* s = new BatchBarrierProcessor(ch);
  s->Prepare(time_out);

  sendrecv::VariableMessage req;
  req.set_varname(BATCH_BARRIER_MESSAGE);
  auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_);
  rpc->Finish(&s->reply_, &s->status_, (void*)s);
  req_count_++;
131
}
Y
Yancey 已提交
132

133 134 135 136 137 138 139 140 141 142
void RPCClient::AsyncSendFetchBarrier(const std::string& ep, int64_t time_out) {
  const auto ch = GetChannel(ep);
  FetchBarrierProcessor* s = new FetchBarrierProcessor(ch);
  s->Prepare(time_out);

  sendrecv::VariableMessage req;
  req.set_varname(FETCH_BARRIER_MESSAGE);
  auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_);
  rpc->Finish(&s->reply_, &s->status_, (void*)s);
  req_count_++;
Y
Yancey 已提交
143 144
}

T
typhoonzero 已提交
145
bool RPCClient::Wait() {
146 147 148
  if (req_count_ <= 0) {
    return true;
  }
Y
Yancey 已提交
149 150
  const size_t kReqCnt = req_count_;
  bool a[kReqCnt];
151
  std::vector<std::future<void>> waits(req_count_);
G
gongweibao 已提交
152

153 154 155 156 157 158 159 160 161 162 163 164 165
  for (int i = 0; i < req_count_; i++) {
    waits[i] = framework::Async([i, &a, this] { a[i] = Proceed(); });
  }

  for (int i = 0; i < req_count_; i++) {
    waits[i].wait();
  }

  int last_req_count = req_count_;
  req_count_ = 0;

  for (int i = 0; i < last_req_count; i++) {
    if (!a[i]) {
G
gongweibao 已提交
166 167 168 169
      return false;
    }
  }

170
  return true;
G
gongweibao 已提交
171 172 173 174 175 176 177 178
}

bool RPCClient::Proceed() {
  void* tag = NULL;
  bool ok = false;

  // request counts.
  if (!cq_.Next(&tag, &ok)) {
G
gongweibao 已提交
179
    LOG(ERROR) << "Get meets CompletionQueue error";
G
gongweibao 已提交
180 181 182 183 184 185 186
    return false;
  }

  GPR_ASSERT(ok);
  PADDLE_ENFORCE(tag);

  // TODO(gongwb): add more retries.
187
  BaseProcessor* c = static_cast<BaseProcessor*>(tag);
G
gongweibao 已提交
188
  if (!c->status_.ok()) {
G
gongweibao 已提交
189 190
    LOG(ERROR) << "proc param error:" << c->var_h_.String()
               << " grpc error:" << c->status_.error_message();
G
gongweibao 已提交
191
    delete c;
G
gongweibao 已提交
192
    return false;
G
gongweibao 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205
  }

  c->Process();
  delete c;
  return true;
}

std::shared_ptr<grpc::Channel> RPCClient::GetChannel(const std::string& ep) {
  auto it = channels_.find(ep);
  if (it != channels_.end()) {
    return it->second;
  }

G
gongweibao 已提交
206
  grpc::ChannelArguments args;
207
  args.SetCompressionAlgorithm(GRPC_COMPRESS_NONE);
G
gongweibao 已提交
208 209 210
  args.SetMaxSendMessageSize(std::numeric_limits<int>::max());
  args.SetMaxReceiveMessageSize(std::numeric_limits<int>::max());

T
typhoonzero 已提交
211 212
  auto ch =
      grpc::CreateCustomChannel(ep, grpc::InsecureChannelCredentials(), args);
G
gongweibao 已提交
213 214 215 216 217 218 219 220

  channels_[ep] = ch;
  return ch;
}

}  // namespace detail
}  // namespace operators
}  // namespace paddle