// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include #include "paddle/fluid/operators/distributed/rpc_server.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { namespace distributed { void RPCServer::ShutDown() { LOG(INFO) << "RPCServer ShutDown "; ShutDownImpl(); exit_flag_ = true; barrier_cond_.notify_all(); rpc_cond_.notify_all(); } void RPCServer::SavePort() const { auto file_path = string::Sprintf("/tmp/paddle.%d.port", ::getpid()); std::ofstream port_file; port_file.open(file_path); port_file << selected_port_; port_file.close(); VLOG(40) << "selected port written to " << file_path; } void RPCServer::WaitBarrier(const std::string& rpc_name) { std::unique_lock lock(this->mutex_); barrier_cond_.wait(lock, [this, &rpc_name] { return ((barrier_counter_[rpc_name] == client_num_ && client_num_ != 0) || exit_flag_.load()); }); VLOG(30) << "batch_barrier_: " << rpc_name << " " << barrier_counter_[rpc_name]; } void RPCServer::IncreaseBatchBarrier(const std::string rpc_name) { VLOG(40) << "RPCServer begin IncreaseBatchBarrier " << rpc_name; int b = 0; std::unique_lock lock(mutex_); b = ++barrier_counter_[rpc_name]; if (b >= client_num_) { lock.unlock(); barrier_cond_.notify_all(); lock.lock(); } } void RPCServer::Complete() { { std::unique_lock lock(mutex_); client_num_--; need_reset_all_vars_ = true; VLOG(40) << "decrease client_num to: " << client_num_; if (cur_cond_.load() == rpc_cond_map_[kRequestGet]) { barrier_counter_[kRequestGet]--; } } barrier_cond_.notify_all(); } bool RPCServer::NeedResetAllVars() { std::unique_lock lock(mutex_); return need_reset_all_vars_; } int RPCServer::GetClientNum() { std::unique_lock lock(mutex_); return client_num_; } void RPCServer::ResetBarrierCounter() { VLOG(30) << "RPCServer ResetBarrierCounter "; std::unique_lock lock(mutex_); for (auto& t : barrier_counter_) { t.second = 0; } need_reset_all_vars_ = false; } void RPCServer::RegisterRPC(const std::string& rpc_name, RequestHandler* handler, int thread_num) { rpc_call_map_[rpc_name] = handler; rpc_thread_num_[rpc_name] = thread_num; static int cond = -1; rpc_cond_map_[rpc_name] = ++cond; VLOG(40) << "RegisterRPC rpc_name:" << rpc_name << ", handler:" << handler << ", cond:" << rpc_cond_map_[rpc_name]; } void RPCServer::SetCond(const std::string& rpc_name) { VLOG(30) << "RPCServer SetCond " << rpc_name; { std::unique_lock lock(mutex_); cur_cond_ = rpc_cond_map_[rpc_name]; } rpc_cond_.notify_all(); } void RPCServer::WaitCond(const std::string& rpc_name) { VLOG(40) << "RPCServer WaitCond " << rpc_name; int cond = 0; { std::unique_lock lock(mutex_); cond = rpc_cond_map_[rpc_name]; } std::unique_lock lock(mutex_); rpc_cond_.wait( lock, [=] { return (cur_cond_.load() == cond || exit_flag_.load()); }); } } // namespace distributed } // namespace operators } // namespace paddle