/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include // for size_t #include #include #include #include "paddle/fluid/framework/channel.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { template class ChannelImpl : public paddle::framework::Channel { friend Channel *paddle::framework::MakeChannel(size_t); friend void paddle::framework::CloseChannel(Channel *); public: virtual bool CanSend(); virtual bool CanReceive(); virtual bool Send(T *); virtual bool Receive(T *); virtual size_t Cap() { return cap_; } virtual void Lock(); virtual void Unlock(); virtual bool IsClosed(); virtual void Close(); ChannelImpl(size_t); virtual ~ChannelImpl(); virtual void AddToSendQ(const void *referrer, T *data, std::shared_ptr cond, std::function cb); virtual void AddToReceiveQ(const void *referrer, T *data, std::shared_ptr cond, std::function cb); virtual void RemoveFromSendQ(const void *referrer); virtual void RemoveFromReceiveQ(const void *referrer); private: struct QueueMessage { T *data; std::shared_ptr cond; bool chan_closed = false; bool completed = false; const void *referrer; // TODO(thuan): figure out better way to do this std::function callback; QueueMessage(T *item) : data(item), cond(std::make_shared()) {} QueueMessage(T *item, std::shared_ptr cond) : data(item), cond(cond) {} void Wait(std::unique_lock &lock) { cond->wait(lock, [this]() { return completed; }); } void Notify() { completed = true; cond->notify_all(); } }; bool send_return(bool value) { send_ctr--; destructor_cond_.notify_all(); return value; } bool recv_return(bool value) { recv_ctr--; destructor_cond_.notify_all(); return value; } size_t cap_; std::recursive_mutex mu_; bool closed_; std::deque buf_; std::deque> recvq; std::deque> sendq; std::atomic send_ctr{0}; std::atomic recv_ctr{0}; std::condition_variable_any destructor_cond_; }; template ChannelImpl::ChannelImpl(size_t capacity) : cap_(capacity), closed_(false), send_ctr(0), recv_ctr(0) { PADDLE_ENFORCE_GE(capacity, 0); } template bool ChannelImpl::CanSend() { std::lock_guard lock{mu_}; return !closed_ && (!recvq.empty() || buf_.size() < cap_); } template bool ChannelImpl::CanReceive() { std::lock_guard lock{mu_}; return !(closed_ && buf_.empty()) && (!sendq.empty() || buf_.size() > 0); } template bool ChannelImpl::Send(T *item) { send_ctr++; std::unique_lock lock{mu_}; // If channel is closed, do nothing if (closed_) { lock.unlock(); // TODO(abhinavarora) Should panic on closed channel return send_return(false); } // If there is a receiver, directly pass the value we want // to send to the receiver, bypassing the channel buffer if any if (!recvq.empty()) { std::shared_ptr m = recvq.front(); recvq.pop_front(); // Do the data transfer // We will do this data transfer if either of the following // cases are true // 1. callback == nullptr // This means it was a regular channel send // 2. callback returns true bool do_send = true; if (m->callback != nullptr) do_send = m->callback(ChannelAction::SEND); if (do_send) *(m->data) = std::move(*item); else // We cannot do the data transfer because // this QueueMessage was added by Select // and some other case was executed. // So call the Send function again. // We do not care about notifying other // because they would have been notified // by the executed select case. return send_return(Send(item)); // Wake up the blocked process and unlock m->Notify(); lock.unlock(); return send_return(true); } // Unbuffered channel will always bypass this // If buffered channel has space in buffer, // write the element to the buffer. if (buf_.size() < cap_) { // Copy to buffer buf_.push_back(std::move(*item)); // Release lock and return true lock.unlock(); return send_return(true); } // Block on channel, because some receiver will complete // the operation for us auto m = std::make_shared(item); sendq.push_back(m); m->Wait(lock); // TODO(abhinavarora) Should panic on closed channel return send_return(!m->chan_closed); } template bool ChannelImpl::Receive(T *item) { recv_ctr++; std::unique_lock lock{mu_}; // If channel is closed and buffer is empty or // channel is unbuffered if (closed_ && buf_.empty()) { lock.unlock(); return recv_return(false); } // If there is a sender, directly receive the value we want // from the sender, bypassing the channel buffer if any if (!sendq.empty()) { std::shared_ptr m = sendq.front(); sendq.pop_front(); // Do the data transfer // We will do this data transfer if either of the following // cases are true // 1. callback == nullptr // This means it was a regular channel send // 2. callback returns true bool do_receive = true; if (m->callback != nullptr) do_receive = m->callback(ChannelAction::RECEIVE); if (do_receive) *item = std::move(*(m->data)); else // We cannot do the data transfer because // this QueueMessage was added by Select // and some other case was executed. // So call the Receive function again. // We do not care about notifying other // because they would have been notified // by the executed select case. return recv_return(Receive(item)); // Wake up the blocked process and unlock m->Notify(); lock.unlock(); return recv_return(true); } // If this is a buffered channel and there are items in buffer if (buf_.size() > 0) { // Directly read from buffer *item = std::move(buf_.front()); buf_.pop_front(); // Release lock and return true lock.unlock(); return recv_return(true); } // No sender available, block on this channel // Some receiver will complete the option for us auto m = std::make_shared(item); recvq.push_back(m); m->Wait(lock); return recv_return(!m->chan_closed); } template void ChannelImpl::Lock() { mu_.lock(); } template void ChannelImpl::Unlock() { mu_.unlock(); } template bool ChannelImpl::IsClosed() { std::lock_guard lock{mu_}; return closed_; } template void ChannelImpl::Close() { std::unique_lock lock{mu_}; if (closed_) { // TODO(abhinavarora): closing an already closed channel should panic lock.unlock(); return; } closed_ = true; // Empty the readers while (!recvq.empty()) { std::shared_ptr m = recvq.front(); recvq.pop_front(); m->chan_closed = true; // Execute callback function (if any) if (m->callback != nullptr) { m->callback(ChannelAction::CLOSE); } m->Notify(); } // Empty the senders while (!sendq.empty()) { std::shared_ptr m = sendq.front(); sendq.pop_front(); m->chan_closed = true; // Execute callback function (if any) if (m->callback != nullptr) { m->callback(ChannelAction::CLOSE); } m->Notify(); } } template void ChannelImpl::AddToSendQ( const void *referrer, T *data, std::shared_ptr cond, std::function cb) { std::lock_guard lock{mu_}; auto m = std::make_shared(data, cond); m->referrer = referrer; m->callback = cb; sendq.push_back(m); } template void ChannelImpl::AddToReceiveQ( const void *referrer, T *data, std::shared_ptr cond, std::function cb) { std::lock_guard lock{mu_}; auto m = std::make_shared(data, cond); m->referrer = referrer; m->callback = cb; recvq.push_back(m); } template void ChannelImpl::RemoveFromSendQ(const void *referrer) { std::lock_guard lock{mu_}; for (auto it = sendq.begin(); it != sendq.end();) { std::shared_ptr sendMsg = (std::shared_ptr)*it; if (sendMsg->referrer == referrer) { it = sendq.erase(it); } else { ++it; } } } template void ChannelImpl::RemoveFromReceiveQ(const void *referrer) { std::lock_guard lock{mu_}; for (auto it = recvq.begin(); it != recvq.end();) { std::shared_ptr recvMsg = (std::shared_ptr)*it; if (recvMsg->referrer == referrer) { it = recvq.erase(it); } else { ++it; } } } template ChannelImpl::~ChannelImpl() { Close(); // The destructor must wait for all readers and writers to complete their task // The channel has been closed, so we will not accept new readers and writers std::unique_lock lock{mu_}; destructor_cond_.wait(lock, [this]() { return send_ctr == 0 && recv_ctr == 0; }); } } // namespace framework } // namespace paddle