提交 b14a4fb7 编写于 作者: K kernelai

feature: proxy connection pool (#978)

上级 6519e860
# vim temp files
*.swp
# Compiled Object files
*.slo
*.lo
......
......@@ -7,9 +7,9 @@
#define PIKA_PROXY_CONN_H_
#include "pink/include/redis_conn.h"
#include "pink/include/backend_thread.h"
#include "include/pika_client_conn.h"
#include <memory>
class ProxyCli;
class PikaProxyConn;
......@@ -27,14 +27,77 @@ class PikaProxyConn: public pink::RedisConn {
pink::Thread *server_thread,
pink::PinkEpoll* pink_epoll,
std::shared_ptr<ProxyCli> proxy_cli);
bool IsAuthed() { return isAuthed_; }
bool IsSelected() { return isSelected_; }
bool IsClosed() { return closed_; }
void SetClose() { closed_ = true;}
virtual ~PikaProxyConn() {}
private:
int DealMessage(
const pink::RedisCmdArgsType& argv,
std::string* response) override;
std::shared_ptr<ProxyCli> proxy_cli_;
std::string auth_;
bool isAuthed_;
int table_;
bool isSelected_;
bool closed_;
};
struct ConnConfig {
ConnConfig( int table, const std::string& auth, int parallel)
: table_(table), auth_(auth), parallel_(parallel) {}
int table_ = 0;
std::string auth_;
int parallel_ = 10;
};
class ParallelConn {
public:
ParallelConn(const std::string& addr, ConnConfig& config,
std::shared_ptr<pink::BackendThread> client);
Status Connect();
Status Start();
void Close();
void Retain();
bool Release();
std::string Addr() { return addr_; }
int GetTable() { return config_.table_; }
Status PrepareConn();
private:
std::shared_ptr<pink::PinkConn> GetConn(int fd);
void VerifyAuth(int fd);
void SelectConn(int fd);
void KeepAlive();
void KeepAliveConn(int fd);
//std::vector<std::shared_ptr<PikaClientConn>> parallelConn_;
std::map<int, int> parallelConn_;
std::set<int> tmpConns_;
std::string addr_;
ConnConfig config_;
std::atomic<int> refCount_;
std::shared_ptr<pink::BackendThread> client_;
};
class ConnectionPool {
public:
ConnectionPool(const ConnConfig& config,
std::shared_ptr<pink::BackendThread> client)
: config_(config), client_(client) { }
void Retain(std::string addr);
void Release(std::string addr);
void AddParallel(const std::string& addr);
private:
// addr and ptr
ConnConfig config_;
std::unordered_map<std::string, ParallelConn *> pool_;
std::shared_ptr<pink::BackendThread> client_;
};
#endif // PIKA_PROXY_CONN_H_
......@@ -126,6 +126,7 @@ Status ProxyCli::ForwardToBackend(ProxyTask* task) {
std::string ip_port = node.Ip() + ":" + std::to_string(node.Port());
backend_task_queue_[ip_port].push_back(cli_task);
}
//TODO to be pipline
std::string ip_port = conn_ptr->ip_port();
if (task_queue_.find(ip_port) != task_queue_.end()) {
ProxyTask* tmp_task = task_queue_[ip_port];
......
......@@ -33,4 +33,140 @@ int PikaProxyConn::DealMessage(
return 0;
}
ParallelConn::ParallelConn(const std::string& addr, ConnConfig& config,
std::shared_ptr<pink::BackendThread> client)
: addr_(addr), config_(config), client_(client) {
refCount_ = 1;
}
Status ParallelConn::Connect() {
int num = parallelConn_.size() + tmpConns_.size();
if (num > config_.parallel_) {
return Status::OK();
}
for (int i = 0; i < num; i++) {
std::string ip;
int port, fd;
if (!slash::ParseIpPortString(addr_, ip, port)) {
LOG(INFO) << "parser addr " << addr_ << " error";
return Status::InvalidArgument("paser addr error, addr: ", addr_);
}
Status s = client_->Connect(ip, port, &fd);
if (!s.ok()) {
LOG(INFO) << "connect addr: " << addr_ << "error: " << s.ToString();
return s;
}
LOG(INFO) << "connect addr: " << addr_ << " fd: " << std::to_string(fd);
tmpConns_.insert(fd);
}
return Status::OK();
}
std::shared_ptr<pink::PinkConn> ParallelConn::GetConn(int fd) {
return client_->GetConn(fd);
}
void ParallelConn::VerifyAuth(int fd) {
}
void ParallelConn::SelectConn(int fd) {
}
void ParallelConn::KeepAlive() {
}
void ParallelConn::KeepAliveConn(int fd) {
}
Status ParallelConn::PrepareConn() {
for(auto item : tmpConns_) {
auto conn = std::dynamic_pointer_cast<PikaProxyConn>(GetConn(item));
if (conn->IsAuthed()) {
SelectConn(item);
} else {
VerifyAuth(item);
SelectConn(item);
}
}
return Status::OK();
}
Status ParallelConn::Start() {
Status s = Connect();
if (!s.ok()) {
return s;
}
s = PrepareConn();
if (!s.ok()) {
return s;
}
return Status::OK();
}
void ParallelConn::Close() {
for (auto item : parallelConn_) {
client_->Close(item.second);
}
parallelConn_.clear();
for (auto item : tmpConns_) {
client_->Close(item);
}
tmpConns_.clear();
}
void ParallelConn::Retain() {
int expect = 0;
if (refCount_.compare_exchange_strong(expect, -1)) {
LOG(INFO) << "retain parallel conn ref count error";
return;
}
refCount_++;
}
bool ParallelConn::Release() {
int expect = 0;
if (refCount_.compare_exchange_strong(expect, -1)) {
LOG(INFO) << "release parallel conn ref count error";
return true;
}
refCount_--;
if (refCount_.compare_exchange_strong(expect, -1)) {
return true;
}
return false;
}
void ConnectionPool::Release(std::string addr) {
if (pool_.find(addr) == pool_.end()) {
return;
}
auto parallel = pool_.find(addr)->second;
if (parallel->Release()) {
parallel->Close();
delete parallel;
pool_.erase(addr);
LOG(INFO) << "release parallel conn :" << parallel->Addr() << " table :"
<< std::to_string(parallel->GetTable());
}
}
void ConnectionPool::AddParallel(const std::string& addr) {
auto conns = new ParallelConn(addr, config_, client_);
pool_.insert(make_pair(addr, conns));
conns->Start();
}
void ConnectionPool::Retain(std::string addr) {
auto iter = pool_.find(addr);
if (iter != pool_.end()) {
iter->second->Retain();
return;
}
AddParallel(addr);
}
Subproject commit fb34df9c885a12ac2f31a19760e93e451ffc4714
Subproject commit a1dd0d78373e24e9f56da4412d9abe07d28899cb
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册