提交 d9da9fc9 编写于 作者: S Shengliang Guan

TD-2393

上级 f979b1f9
...@@ -16,66 +16,26 @@ ...@@ -16,66 +16,26 @@
#define _DEFAULT_SOURCE #define _DEFAULT_SOURCE
#include "os.h" #include "os.h"
#include "tqueue.h" #include "tqueue.h"
#include "tworker.h"
#include "dnodeVRead.h" #include "dnodeVRead.h"
typedef struct {
pthread_t thread; // thread
int32_t workerId; // worker ID
} SVReadWorker;
typedef struct {
int32_t max; // max number of workers
int32_t min; // min number of workers
int32_t num; // current number of workers
SVReadWorker * worker;
pthread_mutex_t mutex;
} SVReadWorkerPool;
static void *dnodeProcessReadQueue(void *pWorker); static void *dnodeProcessReadQueue(void *pWorker);
// module global variable // module global variable
static SVReadWorkerPool tsVReadWP; static SWorkerPool tsVReadWP;
static taos_qset tsVReadQset;
int32_t dnodeInitVRead() { int32_t dnodeInitVRead() {
tsVReadQset = taosOpenQset(); tsVReadWP.name = "vquery";
tsVReadWP.workerFp = dnodeProcessReadQueue;
tsVReadWP.min = tsNumOfCores; tsVReadWP.min = tsNumOfCores;
tsVReadWP.max = tsNumOfCores * tsNumOfThreadsPerCore; tsVReadWP.max = tsNumOfCores * tsNumOfThreadsPerCore;
if (tsVReadWP.max <= tsVReadWP.min * 2) tsVReadWP.max = 2 * tsVReadWP.min; if (tsVReadWP.max <= tsVReadWP.min * 2) tsVReadWP.max = 2 * tsVReadWP.min;
tsVReadWP.worker = calloc(sizeof(SVReadWorker), tsVReadWP.max);
pthread_mutex_init(&tsVReadWP.mutex, NULL);
if (tsVReadWP.worker == NULL) return -1; return tWorkerInit(&tsVReadWP);
for (int i = 0; i < tsVReadWP.max; ++i) {
SVReadWorker *pWorker = tsVReadWP.worker + i;
pWorker->workerId = i;
}
dInfo("dnode vread is initialized, min worker:%d max worker:%d", tsVReadWP.min, tsVReadWP.max);
return 0;
} }
void dnodeCleanupVRead() { void dnodeCleanupVRead() {
for (int i = 0; i < tsVReadWP.max; ++i) { tWorkerCleanup(&tsVReadWP);
SVReadWorker *pWorker = tsVReadWP.worker + i;
if (pWorker->thread) {
taosQsetThreadResume(tsVReadQset);
}
}
for (int i = 0; i < tsVReadWP.max; ++i) {
SVReadWorker *pWorker = tsVReadWP.worker + i;
if (pWorker->thread) {
pthread_join(pWorker->thread, NULL);
}
}
free(tsVReadWP.worker);
taosCloseQset(tsVReadQset);
pthread_mutex_destroy(&tsVReadWP.mutex);
dInfo("dnode vread is closed");
} }
void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
...@@ -109,42 +69,11 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { ...@@ -109,42 +69,11 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) {
} }
void *dnodeAllocVReadQueue(void *pVnode) { void *dnodeAllocVReadQueue(void *pVnode) {
pthread_mutex_lock(&tsVReadWP.mutex); return tWorkerAllocQueue(&tsVReadWP, pVnode);
taos_queue queue = taosOpenQueue();
if (queue == NULL) {
pthread_mutex_unlock(&tsVReadWP.mutex);
return NULL;
}
taosAddIntoQset(tsVReadQset, queue, pVnode);
// spawn a thread to process queue
if (tsVReadWP.num < tsVReadWP.max) {
do {
SVReadWorker *pWorker = tsVReadWP.worker + tsVReadWP.num;
pthread_attr_t thAttr;
pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessReadQueue, pWorker) != 0) {
dError("failed to create thread to process vread vqueue since %s", strerror(errno));
}
pthread_attr_destroy(&thAttr);
tsVReadWP.num++;
dDebug("dnode vread worker:%d is launched, total:%d", pWorker->workerId, tsVReadWP.num);
} while (tsVReadWP.num < tsVReadWP.min);
}
pthread_mutex_unlock(&tsVReadWP.mutex);
dDebug("pVnode:%p, dnode vread queue:%p is allocated", pVnode, queue);
return queue;
} }
void dnodeFreeVReadQueue(void *pRqueue) { void dnodeFreeVReadQueue(void *pRqueue) {
taosCloseQueue(pRqueue); tWorkerFreeQueue(&tsVReadWP, pRqueue);
} }
void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) { void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) {
...@@ -161,18 +90,20 @@ void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) { ...@@ -161,18 +90,20 @@ void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) {
void dnodeDispatchNonRspMsg(void *pVnode, SVReadMsg *pRead, int32_t code) { void dnodeDispatchNonRspMsg(void *pVnode, SVReadMsg *pRead, int32_t code) {
} }
static void *dnodeProcessReadQueue(void *pWorker) { static void *dnodeProcessReadQueue(void *wparam) {
SVReadMsg *pRead; SWorker * pWorker = wparam;
int32_t qtype; SWorkerPool *pPool = pWorker->pPool;
void * pVnode; SVReadMsg * pRead;
int32_t qtype;
void * pVnode;
while (1) { while (1) {
if (taosReadQitemFromQset(tsVReadQset, &qtype, (void **)&pRead, &pVnode) == 0) { if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pRead, &pVnode) == 0) {
dDebug("qset:%p dnode vread got no message from qset, exiting", tsVReadQset); dDebug("dnode vquery got no message from qset:%p, exiting", pPool->qset);
break; break;
} }
dTrace("msg:%p, app:%p type:%s will be processed in vread queue, qtype:%d", pRead, pRead->rpcAhandle, dTrace("msg:%p, app:%p type:%s will be processed in vquery queue, qtype:%d", pRead, pRead->rpcAhandle,
taosMsg[pRead->msgType], qtype); taosMsg[pRead->msgType], qtype);
int32_t code = vnodeProcessRead(pVnode, pRead); int32_t code = vnodeProcessRead(pVnode, pRead);
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_TWORKER_H
#define TDENGINE_TWORKER_H
#ifdef __cplusplus
extern "C" {
#endif
typedef void *(*FWorkerThread)(void *pWorker);
struct SWorkerPool;
typedef struct {
pthread_t thread; // thread
int32_t id; // worker ID
struct SWorkerPool *pPool;
} SWorker;
typedef struct SWorkerPool {
int32_t max; // max number of workers
int32_t min; // min number of workers
int32_t num; // current number of workers
void * qset;
char * name;
SWorker *worker;
FWorkerThread workerFp;
pthread_mutex_t mutex;
} SWorkerPool;
int32_t tWorkerInit(SWorkerPool *pPool);
void tWorkerCleanup(SWorkerPool *pPool);
void * tWorkerAllocQueue(SWorkerPool *pPool, void *ahandle);
void tWorkerFreeQueue(SWorkerPool *pPool, void *pQueue);
#ifdef __cplusplus
}
#endif
#endif
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "tulog.h"
#include "tqueue.h"
#include "tworker.h"
int32_t tWorkerInit(SWorkerPool *pPool) {
pPool->qset = taosOpenQset();
pPool->worker = calloc(sizeof(SWorker), pPool->max);
pthread_mutex_init(&pPool->mutex, NULL);
for (int i = 0; i < pPool->max; ++i) {
SWorker *pWorker = pPool->worker + i;
pWorker->id = i;
pWorker->pPool = pPool;
}
uInfo("worker:%s is initialized, min:%d max:%d", pPool->name, pPool->min, pPool->max);
return 0;
}
void tWorkerCleanup(SWorkerPool *pPool) {
for (int i = 0; i < pPool->max; ++i) {
SWorker *pWorker = pPool->worker + i;
if (pWorker->thread) {
taosQsetThreadResume(pPool->qset);
}
}
for (int i = 0; i < pPool->max; ++i) {
SWorker *pWorker = pPool->worker + i;
if (pWorker->thread) {
pthread_join(pWorker->thread, NULL);
}
}
free(pPool->worker);
taosCloseQset(pPool->qset);
pthread_mutex_destroy(&pPool->mutex);
uInfo("worker:%s is closed", pPool->name);
}
void *tWorkerAllocQueue(SWorkerPool *pPool, void *ahandle) {
pthread_mutex_lock(&pPool->mutex);
taos_queue pQueue = taosOpenQueue();
if (pQueue == NULL) {
pthread_mutex_unlock(&pPool->mutex);
return NULL;
}
taosAddIntoQset(pPool->qset, pQueue, ahandle);
// spawn a thread to process queue
if (pPool->num < pPool->max) {
do {
SWorker *pWorker = pPool->worker + pPool->num;
pthread_attr_t thAttr;
pthread_attr_init(&thAttr);
pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE);
if (pthread_create(&pWorker->thread, &thAttr, pPool->workerFp, pWorker) != 0) {
uError("worker:%s:%d failed to create thread to process since %s", pPool->name, pWorker->id, strerror(errno));
}
pthread_attr_destroy(&thAttr);
pPool->num++;
uDebug("worker:%s:%d is launched, total:%d", pPool->name, pWorker->id, pPool->num);
} while (pPool->num < pPool->min);
}
pthread_mutex_unlock(&pPool->mutex);
uDebug("worker:%s, queue:%p is allocated, ahandle:%p", pPool->name, pQueue, ahandle);
return pQueue;
}
void tWorkerFreeQueue(SWorkerPool *pPool, void *pQueue) {
taosCloseQueue(pQueue);
uDebug("worker:%s, queue:%p is freed", pPool->name, pQueue);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册