提交 355eeda4 编写于 作者: S slguan

Merge branch '2.0' into refact/slguan

# Conflicts:
#	src/dnode/src/dnodeRead.c
...@@ -294,20 +294,21 @@ typedef struct SResRec { ...@@ -294,20 +294,21 @@ typedef struct SResRec {
struct STSBuf; struct STSBuf;
typedef struct { typedef struct {
int32_t code;
int64_t numOfRows; // num of results in current retrieved int64_t numOfRows; // num of results in current retrieved
int64_t numOfTotal; // num of total results int64_t numOfTotal; // num of total results
int64_t numOfTotalInCurrentClause; // num of total result in current subclause int64_t numOfTotalInCurrentClause; // num of total result in current subclause
char * pRsp; char * pRsp;
int rspType; int32_t rspType;
int rspLen; int32_t rspLen;
uint64_t qhandle; uint64_t qhandle;
int64_t uid; int64_t uid;
int64_t useconds; int64_t useconds;
int64_t offset; // offset value from vnode during projection query of stable int64_t offset; // offset value from vnode during projection query of stable
int row; int32_t row;
int16_t numOfCols; int16_t numOfCols;
int16_t precision; int16_t precision;
bool completed;
int32_t code;
int32_t numOfGroups; int32_t numOfGroups;
SResRec * pGroupRec; SResRec * pGroupRec;
char * data; char * data;
......
...@@ -209,7 +209,6 @@ int tscSendMsgToServer(SSqlObj *pSql) { ...@@ -209,7 +209,6 @@ int tscSendMsgToServer(SSqlObj *pSql) {
} }
void tscProcessMsgFromServer(SRpcMsg *rpcMsg) { void tscProcessMsgFromServer(SRpcMsg *rpcMsg) {
tscTrace("response:%s is received, len:%d error:%s", taosMsg[rpcMsg->msgType], rpcMsg->contLen, tstrerror(rpcMsg->code));
SSqlObj *pSql = (SSqlObj *)rpcMsg->handle; SSqlObj *pSql = (SSqlObj *)rpcMsg->handle;
if (pSql == NULL || pSql->signature != pSql) { if (pSql == NULL || pSql->signature != pSql) {
tscError("%p sql is already released, signature:%p", pSql, pSql->signature); tscError("%p sql is already released, signature:%p", pSql, pSql->signature);
...@@ -256,7 +255,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg) { ...@@ -256,7 +255,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg) {
rpcFreeCont(rpcMsg->pCont); rpcFreeCont(rpcMsg->pCont);
return; return;
} else { } else {
tscTrace("%p it shall renew meter meta, code:%d", pSql, rpcMsg->code); tscTrace("%p it shall renew meter meta, code:%d", pSql, tstrerror(rpcMsg->code));
pSql->maxRetry = TSDB_VNODES_SUPPORT * 2; pSql->maxRetry = TSDB_VNODES_SUPPORT * 2;
pSql->res.code = rpcMsg->code; // keep the previous error code pSql->res.code = rpcMsg->code; // keep the previous error code
...@@ -278,7 +277,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg) { ...@@ -278,7 +277,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg) {
if (pRes->code != TSDB_CODE_QUERY_CANCELLED) { if (pRes->code != TSDB_CODE_QUERY_CANCELLED) {
pRes->code = (rpcMsg->code != TSDB_CODE_SUCCESS) ? rpcMsg->code : TSDB_CODE_NETWORK_UNAVAIL; pRes->code = (rpcMsg->code != TSDB_CODE_SUCCESS) ? rpcMsg->code : TSDB_CODE_NETWORK_UNAVAIL;
} else { } else {
tscTrace("%p query is cancelled, code:%d", pSql, pRes->code); tscTrace("%p query is cancelled, code:%d", pSql, tstrerror(pRes->code));
} }
if (pRes->code != TSDB_CODE_QUERY_CANCELLED) { if (pRes->code != TSDB_CODE_QUERY_CANCELLED) {
...@@ -318,19 +317,17 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg) { ...@@ -318,19 +317,17 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg) {
tscTrace("%p cmd:%d code:%d, inserted rows:%d, rsp len:%d", pSql, pCmd->command, pRes->code, tscTrace("%p cmd:%d code:%d, inserted rows:%d, rsp len:%d", pSql, pCmd->command, pRes->code,
*(int32_t *)pRes->pRsp, pRes->rspLen); *(int32_t *)pRes->pRsp, pRes->rspLen);
} else { } else {
tscTrace("%p cmd:%d code:%d rsp len:%d", pSql, pCmd->command, pRes->code, pRes->rspLen); tscTrace("%p cmd:%d code:%s rsp len:%d", pSql, pCmd->command, tstrerror(pRes->code), pRes->rspLen);
} }
} }
if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command]) if (pRes->code == TSDB_CODE_SUCCESS && tscProcessMsgRsp[pCmd->command])
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql); rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
if (rpcMsg->code != TSDB_CODE_ACTION_IN_PROGRESS) { if (rpcMsg->code != TSDB_CODE_ACTION_IN_PROGRESS) {
int command = pCmd->command; int command = pCmd->command;
void *taosres = tscKeepConn[command] ? pSql : NULL; void *taosres = tscKeepConn[command] ? pSql : NULL;
rpcMsg->code = pRes->code ? -pRes->code : pRes->numOfRows; tscTrace("%p Async SQL result:%s res:%p", pSql, tstrerror(pRes->code), taosres);
tscTrace("%p Async SQL result:%d res:%p", pSql, rpcMsg->code, taosres);
/* /*
* Whether to free sqlObj or not should be decided before call the user defined function, since this SqlObj * Whether to free sqlObj or not should be decided before call the user defined function, since this SqlObj
...@@ -2304,6 +2301,7 @@ int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) { ...@@ -2304,6 +2301,7 @@ int tscProcessRetrieveRspFromVnode(SSqlObj *pSql) {
pRes->precision = htons(pRetrieve->precision); pRes->precision = htons(pRetrieve->precision);
pRes->offset = htobe64(pRetrieve->offset); pRes->offset = htobe64(pRetrieve->offset);
pRes->useconds = htobe64(pRetrieve->useconds); pRes->useconds = htobe64(pRetrieve->useconds);
pRes->completed = (pRetrieve->completed == 1);
pRes->data = pRetrieve->data; pRes->data = pRetrieve->data;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
......
...@@ -653,62 +653,6 @@ static void **tscBuildResFromSubqueries(SSqlObj *pSql) { ...@@ -653,62 +653,6 @@ static void **tscBuildResFromSubqueries(SSqlObj *pSql) {
return pRes->tsrow; return pRes->tsrow;
} }
TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pRes->qhandle == 0 || pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
return NULL;
}
if (pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) {
tscFetchDatablockFromSubquery(pSql);
if (pRes->code == TSDB_CODE_SUCCESS) {
tscTrace("%p data from all subqueries have been retrieved to client", pSql);
return tscBuildResFromSubqueries(pSql);
} else {
tscTrace("%p retrieve data from subquery failed, code:%d", pSql, pRes->code);
return NULL;
}
} else if (pRes->row >= pRes->numOfRows) {
/**
* NOT a join query
*
* If the data block of current result set have been consumed already, try fetch next result
* data block from virtual node.
*/
tscResetForNextRetrieve(pRes);
if (pCmd->command < TSDB_SQL_LOCAL) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
}
tscProcessSql(pSql); // retrieve data from virtual node
// if failed to retrieve data from current virtual node, try next one if exists
if (hasMoreVnodesToTry(pSql)) {
tscTryQueryNextVnode(pSql, NULL);
}
/*
* local reducer has handle this case,
* so no need to add the pRes->numOfRows for super table query
*/
if (pCmd->command != TSDB_SQL_RETRIEVE_METRIC) {
pRes->numOfTotalInCurrentClause += pRes->numOfRows;
}
if (pRes->numOfRows == 0) {
return NULL;
}
}
return doSetResultRowData(pSql);
}
static void asyncFetchCallback(void *param, TAOS_RES *tres, int numOfRows) { static void asyncFetchCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlObj* pSql = (SSqlObj*) tres; SSqlObj* pSql = (SSqlObj*) tres;
if (numOfRows < 0) { if (numOfRows < 0) {
...@@ -729,7 +673,10 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { ...@@ -729,7 +673,10 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
SSqlCmd *pCmd = &pSql->cmd; SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res; SSqlRes *pRes = &pSql->res;
if (pRes->qhandle == 0 || pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pCmd->command == TSDB_SQL_INSERT) { if (pRes->qhandle == 0 ||
pRes->completed ||
pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
pCmd->command == TSDB_SQL_INSERT) {
return NULL; return NULL;
} }
......
...@@ -92,8 +92,8 @@ void dnodeRead(SRpcMsg *pMsg) { ...@@ -92,8 +92,8 @@ void dnodeRead(SRpcMsg *pMsg) {
while (leftLen > 0) { while (leftLen > 0) {
SMsgHead *pHead = (SMsgHead *) pCont; SMsgHead *pHead = (SMsgHead *) pCont;
pHead->vgId = 1; //htonl(pHead->vgId); pHead->vgId = htonl(pHead->vgId);
pHead->contLen = pMsg->contLen; //htonl(pHead->contLen); pHead->contLen = htonl(pHead->contLen);
void *pVnode = dnodeGetVnode(pHead->vgId); void *pVnode = dnodeGetVnode(pHead->vgId);
if (pVnode == NULL) { if (pVnode == NULL) {
...@@ -253,17 +253,19 @@ static void dnodeProcessQueryMsg(SReadMsg *pMsg) { ...@@ -253,17 +253,19 @@ static void dnodeProcessQueryMsg(SReadMsg *pMsg) {
qTableQuery(pQInfo); qTableQuery(pQInfo);
} }
static int32_t c = 0;
static void dnodeProcessRetrieveMsg(SReadMsg *pMsg) { static void dnodeProcessRetrieveMsg(SReadMsg *pMsg) {
SRetrieveTableMsg *pRetrieve = pMsg->pCont; SRetrieveTableMsg *pRetrieve = pMsg->pCont;
void *pQInfo = (void*) htobe64(pRetrieve->qhandle); void *pQInfo = (void*) htobe64(pRetrieve->qhandle);
dTrace("QInfo:%p vgId:%d, retrieve msg is received", pQInfo, pRetrieve->header.vgId); dTrace("QInfo:%p vgId:%d, retrieve msg is received", pQInfo, pRetrieve->header.vgId);
if ((++c)%2 == 0) {
int32_t k = 1;
}
int32_t rowSize = 0; int32_t rowSize = 0;
int32_t numOfRows = 0; int32_t numOfRows = 0;
int32_t contLen = 0; int32_t contLen = 0;
SRpcMsg rpcRsp = {0};
SRetrieveTableRsp *pRsp = NULL; SRetrieveTableRsp *pRsp = NULL;
int32_t code = qRetrieveQueryResultInfo(pQInfo, &numOfRows, &rowSize); int32_t code = qRetrieveQueryResultInfo(pQInfo, &numOfRows, &rowSize);
...@@ -277,7 +279,7 @@ static void dnodeProcessRetrieveMsg(SReadMsg *pMsg) { ...@@ -277,7 +279,7 @@ static void dnodeProcessRetrieveMsg(SReadMsg *pMsg) {
code = qDumpRetrieveResult(pQInfo, &pRsp, &contLen); code = qDumpRetrieveResult(pQInfo, &pRsp, &contLen);
} }
rpcRsp = (SRpcMsg) { SRpcMsg rpcRsp = (SRpcMsg) {
.handle = pMsg->rpcMsg.handle, .handle = pMsg->rpcMsg.handle,
.pCont = pRsp, .pCont = pRsp,
.contLen = contLen, .contLen = contLen,
...@@ -286,4 +288,7 @@ static void dnodeProcessRetrieveMsg(SReadMsg *pMsg) { ...@@ -286,4 +288,7 @@ static void dnodeProcessRetrieveMsg(SReadMsg *pMsg) {
}; };
rpcSendResponse(&rpcRsp); rpcSendResponse(&rpcRsp);
//todo merge result should be done here
//dnodeProcessReadResult(&readMsg);
} }
...@@ -33,7 +33,7 @@ typedef struct SData { ...@@ -33,7 +33,7 @@ typedef struct SData {
} SData; } SData;
enum { enum {
ST_QUERY_KILLED = 0, // query killed // ST_QUERY_KILLED = 0, // query killed
ST_QUERY_PAUSED = 1, // query paused, due to full of the response buffer ST_QUERY_PAUSED = 1, // query paused, due to full of the response buffer
ST_QUERY_COMPLETED = 2, // query completed ST_QUERY_COMPLETED = 2, // query completed
}; };
...@@ -142,8 +142,8 @@ typedef struct SQuery { ...@@ -142,8 +142,8 @@ typedef struct SQuery {
SResultRec rec; SResultRec rec;
int32_t pos; int32_t pos;
int64_t pointsOffset; // the number of points offset to save read data int64_t pointsOffset; // the number of points offset to save read data
SData** sdata; SData** sdata;
int32_t capacity; int32_t capacity;
SSingleColumnFilterInfo* pFilterInfo; SSingleColumnFilterInfo* pFilterInfo;
} SQuery; } SQuery;
...@@ -170,14 +170,14 @@ typedef struct SQueryRuntimeEnv { ...@@ -170,14 +170,14 @@ typedef struct SQueryRuntimeEnv {
} SQueryRuntimeEnv; } SQueryRuntimeEnv;
typedef struct SQInfo { typedef struct SQInfo {
uint64_t signature; void* signature;
void* pVnode; void* pVnode;
TSKEY startTime; TSKEY startTime;
int64_t elapsedTime; TSKEY elapsedTime;
SResultRec rec; SResultRec rec;
int32_t pointsInterpo; int32_t pointsInterpo;
int32_t code; // error code to returned to client int32_t code; // error code to returned to client
int32_t killed; // denotes if current query is killed // int32_t killed; // denotes if current query is killed
sem_t dataReady; sem_t dataReady;
SArray* pTableIdList; // table list SArray* pTableIdList; // table list
SQueryRuntimeEnv runtimeEnv; SQueryRuntimeEnv runtimeEnv;
......
...@@ -64,38 +64,24 @@ typedef struct SPointInterpoSupporter { ...@@ -64,38 +64,24 @@ typedef struct SPointInterpoSupporter {
} SPointInterpoSupporter; } SPointInterpoSupporter;
typedef enum { typedef enum {
// when query starts to execute, this status will set
/*
* the program will call this function again, if this status is set.
* used to transfer from QUERY_RESBUF_FULL
*/
QUERY_NOT_COMPLETED = 0x1u, QUERY_NOT_COMPLETED = 0x1u,
/* /* result output buffer is full, current query is paused.
* output buffer is full, so, the next query will be employed, * this status is only exist in group-by clause and diff/add/division/multiply/ query.
* in this case, we need to set the appropriated start scan point for
* the next query.
*
* this status is only exist in group-by clause and
* diff/add/division/multiply/ query.
*/ */
QUERY_RESBUF_FULL = 0x2u, QUERY_RESBUF_FULL = 0x2u,
/* /* query is over
* query is over * 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc.
* 1. this status is used in one row result query process, e.g., * 2. when all data within queried time window, it is also denoted as query_completed
* count/sum/first/last/
* avg...etc.
* 2. when the query range on timestamp is satisfied, it is also denoted as
* query_compeleted
*/ */
QUERY_COMPLETED = 0x4u, QUERY_COMPLETED = 0x4u,
/* /* when the result is not completed return to client, this status will be
* all data has been scanned, so current search is stopped, * usually used in case of interval query with interpolation option
* At last, the function will transfer this status to QUERY_COMPLETED
*/ */
QUERY_NO_DATA_TO_CHECK = 0x8u, QUERY_OVER = 0x8u,
} vnodeQueryStatus; } vnodeQueryStatus;
static void setQueryStatus(SQuery *pQuery, int8_t status); static void setQueryStatus(SQuery *pQuery, int8_t status);
...@@ -1301,7 +1287,7 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStat ...@@ -1301,7 +1287,7 @@ static int32_t rowwiseApplyAllFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStat
if (pRuntimeEnv->pTSBuf != NULL) { if (pRuntimeEnv->pTSBuf != NULL) {
// if timestamp filter list is empty, quit current query // if timestamp filter list is empty, quit current query
if (!tsBufNextPos(pRuntimeEnv->pTSBuf)) { if (!tsBufNextPos(pRuntimeEnv->pTSBuf)) {
setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); setQueryStatus(pQuery, QUERY_COMPLETED);
break; break;
} }
} }
...@@ -1621,10 +1607,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { ...@@ -1621,10 +1607,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) {
pRuntimeEnv->pTSBuf = tsBufDestory(pRuntimeEnv->pTSBuf); pRuntimeEnv->pTSBuf = tsBufDestory(pRuntimeEnv->pTSBuf);
} }
bool isQueryKilled(SQuery *pQuery) { static bool isQueryKilled(SQuery *pQuery) {
return false;
SQInfo *pQInfo = (SQInfo *)GET_QINFO_ADDR(pQuery);
#if 0 #if 0
/* /*
* check if the queried meter is going to be deleted. * check if the queried meter is going to be deleted.
...@@ -1638,9 +1621,14 @@ bool isQueryKilled(SQuery *pQuery) { ...@@ -1638,9 +1621,14 @@ bool isQueryKilled(SQuery *pQuery) {
return (pQInfo->killed == 1); return (pQInfo->killed == 1);
#endif #endif
return 0; return 0;
} }
static bool setQueryKilled(SQInfo* pQInfo) {
pQInfo->code = TSDB_CODE_QUERY_CANCELLED;
}
bool isFixedOutputQuery(SQuery *pQuery) { bool isFixedOutputQuery(SQuery *pQuery) {
if (pQuery->intervalTime != 0) { if (pQuery->intervalTime != 0) {
return false; return false;
...@@ -2664,7 +2652,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { ...@@ -2664,7 +2652,6 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
while (tsdbNextDataBlock(pQueryHandle)) { while (tsdbNextDataBlock(pQueryHandle)) {
// check if query is killed or not set the status of query to pass the status check // check if query is killed or not set the status of query to pass the status check
if (isQueryKilled(pQuery)) { if (isQueryKilled(pQuery)) {
setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK);
return cnt; return cnt;
} }
...@@ -2714,7 +2701,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { ...@@ -2714,7 +2701,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
} }
if (isIntervalQuery(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) { if (isIntervalQuery(pQuery) && IS_MASTER_SCAN(pRuntimeEnv)) {
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP; int32_t step = QUERY_IS_ASC_QUERY(pQuery) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP;
closeAllTimeWindow(&pRuntimeEnv->windowResInfo); closeAllTimeWindow(&pRuntimeEnv->windowResInfo);
...@@ -3631,7 +3618,7 @@ void vnodeScanAllData(SQueryRuntimeEnv *pRuntimeEnv) { ...@@ -3631,7 +3618,7 @@ void vnodeScanAllData(SQueryRuntimeEnv *pRuntimeEnv) {
/* check if query is killed or not */ /* check if query is killed or not */
if (isQueryKilled(pQuery)) { if (isQueryKilled(pQuery)) {
setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK); // setQueryStatus(pQuery, QUERY_NO_DATA_TO_CHECK);
return; return;
} }
} }
...@@ -4111,7 +4098,7 @@ bool vnodeHasRemainResults(void *handle) { ...@@ -4111,7 +4098,7 @@ bool vnodeHasRemainResults(void *handle) {
} }
// query has completed // query has completed
if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
TSKEY ekey = taosGetRevisedEndKey(pQuery->window.ekey, pQuery->order.order, pQuery->intervalTime, TSKEY ekey = taosGetRevisedEndKey(pQuery->window.ekey, pQuery->order.order, pQuery->intervalTime,
pQuery->slidingTimeUnit, pQuery->precision); pQuery->slidingTimeUnit, pQuery->precision);
// int32_t numOfTotal = taosGetNumOfResultWithInterpo(pInterpoInfo, (TSKEY // int32_t numOfTotal = taosGetNumOfResultWithInterpo(pInterpoInfo, (TSKEY
...@@ -4272,7 +4259,7 @@ int32_t initQInfo(SQInfo *pQInfo, void *param, void* tsdb) { ...@@ -4272,7 +4259,7 @@ int32_t initQInfo(SQInfo *pQInfo, void *param, void* tsdb) {
pQuery->window.ekey, pQuery->order.order); pQuery->window.ekey, pQuery->order.order);
sem_post(&pQInfo->dataReady); sem_post(&pQInfo->dataReady);
pQInfo->killed = 1; setQueryStatus(pQuery, QUERY_COMPLETED);
return TSDB_CODE_SUCCESS; return TSDB_CODE_SUCCESS;
} }
...@@ -5024,7 +5011,7 @@ static void tableFixedOutputProcessor(SQInfo *pQInfo) { ...@@ -5024,7 +5011,7 @@ static void tableFixedOutputProcessor(SQInfo *pQInfo) {
// since the numOfOutputElems must be identical for all sql functions that are allowed to be executed simutanelously. // since the numOfOutputElems must be identical for all sql functions that are allowed to be executed simutanelously.
pQuery->rec.pointsRead = getNumOfResult(pRuntimeEnv); pQuery->rec.pointsRead = getNumOfResult(pRuntimeEnv);
// assert(pQuery->pointsRead <= pQuery->pointsToRead && // assert(pQuery->pointsRead <= pQuery->pointsToRead &&
// Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)); // Q_STATUS_EQUAL(pQuery->over, QUERY_COMPLETED));
// must be top/bottom query if offset > 0 // must be top/bottom query if offset > 0
if (pQuery->limit.offset > 0) { if (pQuery->limit.offset > 0) {
...@@ -5128,7 +5115,7 @@ static void vnodeSingleMeterIntervalMainLooper(SQueryRuntimeEnv *pRuntimeEnv) { ...@@ -5128,7 +5115,7 @@ static void vnodeSingleMeterIntervalMainLooper(SQueryRuntimeEnv *pRuntimeEnv) {
pQuery->limit.offset -= c; pQuery->limit.offset -= c;
} }
if (Q_STATUS_EQUAL(pQuery->status, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
break; break;
} }
...@@ -5178,7 +5165,7 @@ static void tableIntervalProcessor(SQInfo *pQInfo) { ...@@ -5178,7 +5165,7 @@ static void tableIntervalProcessor(SQInfo *pQInfo) {
pQInfo, (tFilePage **)pQuery->sdata, (tFilePage **)pInterpoBuf, pQuery->rec.pointsRead, &numOfInterpo); pQInfo, (tFilePage **)pQuery->sdata, (tFilePage **)pInterpoBuf, pQuery->rec.pointsRead, &numOfInterpo);
dTrace("QInfo: %p interpo completed, final:%d", pQInfo, pQuery->rec.pointsRead); dTrace("QInfo: %p interpo completed, final:%d", pQInfo, pQuery->rec.pointsRead);
if (pQuery->rec.pointsRead > 0 || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED | QUERY_NO_DATA_TO_CHECK)) { if (pQuery->rec.pointsRead > 0 || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
doRevisedResultsByLimit(pQInfo); doRevisedResultsByLimit(pQInfo);
break; break;
} }
...@@ -5206,17 +5193,20 @@ static void tableIntervalProcessor(SQInfo *pQInfo) { ...@@ -5206,17 +5193,20 @@ static void tableIntervalProcessor(SQInfo *pQInfo) {
} }
void qTableQuery(SQInfo *pQInfo) { void qTableQuery(SQInfo *pQInfo) {
assert(pQInfo != NULL); if (pQInfo == NULL || pQInfo->signature != pQInfo) {
dTrace("%p freed abort query", pQInfo);
if (pQInfo->killed) {
dTrace("QInfo:%p it is already killed, abort", pQInfo);
return; return;
} }
SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv;
SQuery * pQuery = pRuntimeEnv->pQuery;
SQuery *pQuery = pRuntimeEnv->pQuery;
// dTrace("vid:%d sid:%d id:%s, query thread is created, numOfQueries:%d, QInfo:%p", pQInfo); if (isQueryKilled(pQuery)) {
dTrace("QInfo:%p it is already killed, abort", pQInfo);
return;
}
dTrace("QInfo:%p query task is launched", pQInfo);
if (vnodeHasRemainResults(pQInfo)) { if (vnodeHasRemainResults(pQInfo)) {
/* /*
...@@ -5242,7 +5232,7 @@ void qTableQuery(SQInfo *pQInfo) { ...@@ -5242,7 +5232,7 @@ void qTableQuery(SQInfo *pQInfo) {
} }
// here we have scan all qualified data in both data file and cache // here we have scan all qualified data in both data file and cache
if (Q_STATUS_EQUAL(pQuery->status, QUERY_NO_DATA_TO_CHECK | QUERY_COMPLETED)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
// continue to get push data from the group result // continue to get push data from the group result
if (isGroupbyNormalCol(pQuery->pGroupbyExpr) || if (isGroupbyNormalCol(pQuery->pGroupbyExpr) ||
(pQuery->intervalTime > 0 && pQInfo->rec.pointsTotal < pQuery->limit.limit)) { (pQuery->intervalTime > 0 && pQInfo->rec.pointsTotal < pQuery->limit.limit)) {
...@@ -5303,10 +5293,8 @@ void qTableQuery(SQInfo *pQInfo) { ...@@ -5303,10 +5293,8 @@ void qTableQuery(SQInfo *pQInfo) {
/* check if query is killed or not */ /* check if query is killed or not */
if (isQueryKilled(pQuery)) { if (isQueryKilled(pQuery)) {
dTrace("QInfo:%p query is killed", pQInfo); dTrace("QInfo:%p query is killed", pQInfo);
// pQInfo->over = 1;
} else { } else {
// dTrace("QInfo:%p vid:%d sid:%d id:%s, meter query thread completed, %d points are returned", pQInfo, dTrace("QInfo:%p query task completed, %d points are returned", pQInfo, pQuery->rec.pointsRead);
// pMeterObj->vnode, pMeterObj->sid, pMeterObj->meterId, pQuery->pointsRead);
} }
sem_post(&pQInfo->dataReady); sem_post(&pQInfo->dataReady);
...@@ -5989,21 +5977,16 @@ bool isQInfoValid(void *param) { ...@@ -5989,21 +5977,16 @@ bool isQInfoValid(void *param) {
return (sig == (uint64_t)pQInfo); return (sig == (uint64_t)pQInfo);
} }
void vnodeFreeQInfo(SQInfo *pQInfo, bool decQueryRef) { void vnodeFreeQInfo(SQInfo *pQInfo) {
if (!isQInfoValid(pQInfo)) { if (!isQInfoValid(pQInfo)) {
return; return;
} }
pQInfo->killed = 1; SQuery* pQuery = pQInfo->runtimeEnv.pQuery;
setQueryKilled(pQInfo);
dTrace("QInfo:%p start to free SQInfo", pQInfo); dTrace("QInfo:%p start to free SQInfo", pQInfo);
for (int32_t col = 0; col < pQuery->numOfOutputCols; ++col) {
if (decQueryRef) {
vnodeDecMeterRefcnt(pQInfo);
}
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
for (int col = 0; col < pQuery->numOfOutputCols; ++col) {
tfree(pQuery->sdata[col]); tfree(pQuery->sdata[col]);
} }
...@@ -6049,7 +6032,7 @@ void vnodeFreeQInfo(SQInfo *pQInfo, bool decQueryRef) { ...@@ -6049,7 +6032,7 @@ void vnodeFreeQInfo(SQInfo *pQInfo, bool decQueryRef) {
tfree(pQuery->pGroupbyExpr); tfree(pQuery->pGroupbyExpr);
tfree(pQuery); tfree(pQuery);
// dTrace("QInfo:%p vid:%d sid:%d meterId:%s, QInfo is freed", pQInfo, pObj->vnode, pObj->sid, pObj->meterId); dTrace("QInfo:%p QInfo is freed", pQInfo);
// destroy signature, in order to avoid the query process pass the object safety check // destroy signature, in order to avoid the query process pass the object safety check
memset(pQInfo, 0, sizeof(SQInfo)); memset(pQInfo, 0, sizeof(SQInfo));
...@@ -6105,7 +6088,7 @@ static int32_t createQInfo(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyE ...@@ -6105,7 +6088,7 @@ static int32_t createQInfo(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyE
_error: _error:
// table query ref will be decrease during error handling // table query ref will be decrease during error handling
vnodeFreeQInfo(*pQInfo, false); vnodeFreeQInfo(*pQInfo);
return code; return code;
} }
...@@ -6176,28 +6159,25 @@ int32_t qRetrieveQueryResultInfo(SQInfo *pQInfo, int32_t *numOfRows, int32_t *ro ...@@ -6176,28 +6159,25 @@ int32_t qRetrieveQueryResultInfo(SQInfo *pQInfo, int32_t *numOfRows, int32_t *ro
if (pQInfo == NULL || !isQInfoValid(pQInfo)) { if (pQInfo == NULL || !isQInfoValid(pQInfo)) {
return TSDB_CODE_INVALID_QHANDLE; return TSDB_CODE_INVALID_QHANDLE;
} }
if (pQInfo->killed) { SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
if (isQueryKilled(pQuery)) {
dTrace("QInfo:%p query is killed, code:%d", pQInfo, pQInfo->code); dTrace("QInfo:%p query is killed, code:%d", pQInfo, pQInfo->code);
if (pQInfo->code == TSDB_CODE_SUCCESS) { if (pQInfo->code == TSDB_CODE_SUCCESS) {
return TSDB_CODE_QUERY_CANCELLED; return TSDB_CODE_QUERY_CANCELLED;
} else { // in case of not TSDB_CODE_SUCCESS, return the code to client } else { // in case of not TSDB_CODE_SUCCESS, return the code to client
return abs(pQInfo->code); return (pQInfo->code >= 0)? pQInfo->code:(-pQInfo->code);
} }
} }
sem_wait(&pQInfo->dataReady); sem_wait(&pQInfo->dataReady);
SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
*numOfRows = pQInfo->rec.pointsRead; *numOfRows = pQInfo->rec.pointsRead;
*rowsize = pQuery->rowSize; *rowsize = pQuery->rowSize;
dTrace("QInfo:%p retrieve result info, rowsize:%d, rows:%d, code:%d", pQInfo, *rowsize, *numOfRows, pQInfo->code); dTrace("QInfo:%p retrieve result info, rowsize:%d, rows:%d, code:%d", pQInfo, *rowsize, *numOfRows, pQInfo->code);
if (pQInfo->code < 0) { // less than 0 means there are error existed. return (pQInfo->code >= 0)? pQInfo->code:(-pQInfo->code);
return -pQInfo->code;
}
} }
static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) {
...@@ -6250,6 +6230,11 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data, int32_t *size) { ...@@ -6250,6 +6230,11 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data, int32_t *size) {
pQInfo->rec.pointsTotal += pQInfo->rec.pointsRead; pQInfo->rec.pointsTotal += pQInfo->rec.pointsRead;
dTrace("QInfo:%p current:%d, total:%d", pQInfo, pQInfo->rec.pointsRead, pQInfo->rec.pointsTotal); dTrace("QInfo:%p current:%d, total:%d", pQInfo, pQInfo->rec.pointsRead, pQInfo->rec.pointsTotal);
setQueryStatus(pQuery, QUERY_COMPLETED);
return TSDB_CODE_SUCCESS;
// todo if interpolation exists, the result may be dump to client by several rounds
} }
static void addToTaskQueue(SQInfo* pQInfo) { static void addToTaskQueue(SQInfo* pQInfo) {
...@@ -6261,11 +6246,7 @@ static void addToTaskQueue(SQInfo* pQInfo) { ...@@ -6261,11 +6246,7 @@ static void addToTaskQueue(SQInfo* pQInfo) {
dTrace("QInfo:%p set query flag, sig:%" PRIu64 ", func:%s", pQInfo, pQInfo->signature, __FUNCTION__); dTrace("QInfo:%p set query flag, sig:%" PRIu64 ", func:%s", pQInfo, pQInfo->signature, __FUNCTION__);
#endif #endif
if (pQInfo->killed == 1) { // todo add to task queue
dTrace("%p freed or killed, abort query", pQInfo);
} else {
// todo add to task queue
}
} }
} }
...@@ -6293,12 +6274,20 @@ int32_t qDumpRetrieveResult(SQInfo *pQInfo, SRetrieveTableRsp** pRsp, int32_t* c ...@@ -6293,12 +6274,20 @@ int32_t qDumpRetrieveResult(SQInfo *pQInfo, SRetrieveTableRsp** pRsp, int32_t* c
} }
if (pQInfo->rec.pointsRead > 0 && code == TSDB_CODE_SUCCESS) { if (pQInfo->rec.pointsRead > 0 && code == TSDB_CODE_SUCCESS) {
doDumpQueryResult(pQInfo, (*pRsp)->data, NULL); code = doDumpQueryResult(pQInfo, (*pRsp)->data, NULL);
// has more data to return or need next round to execute
addToTaskQueue(pQInfo); addToTaskQueue(pQInfo);
return TSDB_CODE_SUCCESS; } else if (isQueryKilled(pQuery)) {
code = TSDB_CODE_QUERY_CANCELLED;
} }
assert(code != TSDB_CODE_ACTION_IN_PROGRESS); if (isQueryKilled(pQuery) || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
(*pRsp)->completed = 1; // notify no more result to client
vnodeFreeQInfo(pQInfo);
}
return code;
// if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS)) { // if (numOfRows == 0 && (pRetrieve->qhandle == (uint64_t)pObj->qhandle) && (code != TSDB_CODE_ACTION_IN_PROGRESS)) {
// dTrace("QInfo:%p %s free qhandle code:%d", pObj->qhandle, __FUNCTION__, code); // dTrace("QInfo:%p %s free qhandle code:%d", pObj->qhandle, __FUNCTION__, code);
......
...@@ -162,8 +162,6 @@ void taosDeleteStrHash(void *handle, char *string) { ...@@ -162,8 +162,6 @@ void taosDeleteStrHash(void *handle, char *string) {
if (pObj == NULL || pObj->maxSessions == 0) return; if (pObj == NULL || pObj->maxSessions == 0) return;
if (string == NULL || string[0] == 0) return; if (string == NULL || string[0] == 0) return;
return;
hash = (*(pObj->hashFp))(pObj, string); hash = (*(pObj->hashFp))(pObj, string);
pthread_mutex_lock(&pObj->mutex); pthread_mutex_lock(&pObj->mutex);
......
...@@ -34,20 +34,22 @@ typedef enum { ...@@ -34,20 +34,22 @@ typedef enum {
TSDB_FILE_TYPE_MAX TSDB_FILE_TYPE_MAX
} TSDB_FILE_TYPE; } TSDB_FILE_TYPE;
extern const char *tsdbFileSuffix[]; #define IS_VALID_TSDB_FILE_TYPE(type) ((type) >= TSDB_FILE_TYPE_HEAD && (type) < TSDB_FILE_TYPE_MAX)
typedef struct { extern const char *tsdbFileSuffix[];
int64_t size;
int64_t tombSize;
} SFileInfo;
typedef struct { typedef struct {
int8_t type; int8_t type;
int fd;
char fname[128]; char fname[128];
int64_t size; // total size of the file int64_t size; // total size of the file
int64_t tombSize; // unused file size int64_t tombSize; // unused file size
int32_t totalBlocks;
int32_t totalSubBlocks;
} SFile; } SFile;
#define TSDB_IS_FILE_OPENED(f) ((f)->fd != -1)
typedef struct { typedef struct {
int32_t fileId; int32_t fileId;
SFile files[TSDB_FILE_TYPE_MAX]; SFile files[TSDB_FILE_TYPE_MAX];
...@@ -55,14 +57,26 @@ typedef struct { ...@@ -55,14 +57,26 @@ typedef struct {
// TSDB file handle // TSDB file handle
typedef struct { typedef struct {
int32_t daysPerFile; int maxFGroups;
int32_t keep; int numOfFGroups;
int32_t minRowPerFBlock;
int32_t maxRowsPerFBlock;
int32_t maxTables;
SFileGroup fGroup[]; SFileGroup fGroup[];
} STsdbFileH; } STsdbFileH;
#define TSDB_MIN_FILE_ID(fh) (fh)->fGroup[0].fileId
#define TSDB_MAX_FILE_ID(fh) (fh)->fGroup[(fh)->numOfFGroups - 1].fileId
STsdbFileH *tsdbInitFileH(char *dataDir, int maxFiles);
void tsdbCloseFileH(STsdbFileH *pFileH);
int tsdbCreateFGroup(STsdbFileH *pFileH, char *dataDir, int fid, int maxTables);
int tsdbRemoveFileGroup(STsdbFileH *pFile, int fid);
typedef struct {
int32_t len;
int32_t padding; // For padding purpose
int64_t offset;
} SCompIdx;
/** /**
* if numOfSubBlocks == -1, then the SCompBlock is a sub-block * if numOfSubBlocks == -1, then the SCompBlock is a sub-block
* if numOfSubBlocks == 1, then the SCompBlock refers to the data block, and offset/len refer to * if numOfSubBlocks == 1, then the SCompBlock refers to the data block, and offset/len refer to
...@@ -83,14 +97,32 @@ typedef struct { ...@@ -83,14 +97,32 @@ typedef struct {
TSKEY keyLast; TSKEY keyLast;
} SCompBlock; } SCompBlock;
#define IS_VALID_TSDB_FILE_TYPE(type) ((type) >= TSDB_FILE_TYPE_HEAD && (type) < TSDB_FILE_TYPE_MAX) typedef struct {
int32_t delimiter; // For recovery usage
int32_t checksum; // TODO: decide if checksum logic in this file or make it one API
int64_t uid;
int32_t padding; // For padding purpose
int32_t numOfBlocks; // TODO: make the struct padding
SCompBlock blocks[];
} SCompInfo;
STsdbFileH *tsdbInitFile(char *dataDir, int32_t daysPerFile, int32_t keep, int32_t minRowsPerFBlock, // TODO: take pre-calculation into account
int32_t maxRowsPerFBlock, int32_t maxTables); typedef struct {
int16_t colId; // Column ID
int16_t len; // Column length
int32_t type : 8;
int32_t offset : 24;
} SCompCol;
// TODO: Take recover into account
typedef struct {
int32_t delimiter; // For recovery usage
int32_t numOfCols; // For recovery usage
int64_t uid; // For recovery usage
SCompCol cols[];
} SCompData;
void tsdbCloseFile(STsdbFileH *pFileH); void tsdbGetKeyRangeOfFileId(int32_t daysPerFile, int8_t precision, int32_t fileId, TSKEY *minKey, TSKEY *maxKey);
int tsdbCreateFileGroup(char *dataDir, int fileId, SFileGroup *pFGroup, int maxTables);
void tsdbGetKeyRangeOfFileId(int32_t daysPerFile, int8_t precision, int32_t fileId, TSKEY *minKey, TSKEY *maxKey);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
......
...@@ -27,72 +27,126 @@ ...@@ -27,72 +27,126 @@
#define TSDB_FILE_HEAD_SIZE 512 #define TSDB_FILE_HEAD_SIZE 512
#define TSDB_FILE_DELIMITER 0xF00AFA0F #define TSDB_FILE_DELIMITER 0xF00AFA0F
typedef struct {
int32_t len;
int32_t padding; // For padding purpose
int64_t offset;
} SCompIdx;
typedef struct {
int32_t delimiter; // For recovery usage
int32_t checksum; // TODO: decide if checksum logic in this file or make it one API
int64_t uid;
int32_t padding; // For padding purpose
int32_t numOfBlocks; // TODO: make the struct padding
SCompBlock blocks[];
} SCompInfo;
// TODO: take pre-calculation into account
typedef struct {
int16_t colId; // Column ID
int16_t len; // Column length
int32_t type : 8;
int32_t offset : 24;
} SCompCol;
// TODO: Take recover into account
typedef struct {
int32_t delimiter; // For recovery usage
int32_t numOfCols; // For recovery usage
int64_t uid; // For recovery usage
SCompCol cols[];
} SCompData;
const char *tsdbFileSuffix[] = { const char *tsdbFileSuffix[] = {
".head", // TSDB_FILE_TYPE_HEAD ".head", // TSDB_FILE_TYPE_HEAD
".data", // TSDB_FILE_TYPE_DATA ".data", // TSDB_FILE_TYPE_DATA
".last" // TSDB_FILE_TYPE_LAST ".last" // TSDB_FILE_TYPE_LAST
}; };
static int tsdbWriteFileHead(int fd, SFile *pFile) { static int compFGroupKey(const void *key, const void *fgroup);
static int compFGroup(const void *arg1, const void *arg2);
static int tsdbGetFileName(char *dataDir, int fileId, int8_t type, char *fname);
static int tsdbCreateFile(char *dataDir, int fileId, int8_t type, int maxTables, SFile *pFile);
static int tsdbWriteFileHead(SFile *pFile);
static int tsdbWriteHeadFileIdx(SFile *pFile, int maxTables);
STsdbFileH *tsdbInitFileH(char *dataDir, int maxFiles) {
STsdbFileH *pFileH = (STsdbFileH *)calloc(1, sizeof(STsdbFileH) + sizeof(SFileGroup) * maxFiles);
if (pFileH == NULL) { // TODO: deal with ERROR here
return NULL;
}
pFileH->maxFGroups = maxFiles;
DIR *dir = opendir(dataDir);
if (dir == NULL) {
free(pFileH);
return NULL;
}
struct dirent *dp;
while ((dp = readdir(dir)) != NULL) {
if (strncmp(dp->d_name, ".", 1) == 0 || strncmp(dp->d_name, "..", 1) == 0) continue;
// TODO
}
return pFileH;
}
void tsdbCloseFileH(STsdbFileH *pFileH) { free(pFileH); }
int tsdbCreateFGroup(STsdbFileH *pFileH, char *dataDir, int fid, int maxTables) {
if (pFileH->numOfFGroups >= pFileH->maxFGroups) return -1;
SFileGroup fGroup;
SFileGroup *pFGroup = &fGroup;
if (fid < TSDB_MIN_FILE_ID(pFileH) || fid > TSDB_MAX_FILE_ID(pFileH) ||
bsearch((void *)&fid, (void *)(pFileH->fGroup), pFileH->numOfFGroups, sizeof(SFileGroup), compFGroupKey) ==
NULL) {
pFGroup->fileId = fid;
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
if (tsdbCreateFile(dataDir, fid, type, maxTables, &(pFGroup->files[type])) < 0) {
// TODO: deal with the ERROR here, remove those creaed file
return -1;
}
}
pFileH->fGroup[pFileH->numOfFGroups++] = fGroup;
qsort((void *)(pFileH->fGroup), pFileH->numOfFGroups, sizeof(SFileGroup), compFGroup);
}
return 0;
}
int tsdbRemoveFileGroup(STsdbFileH *pFileH, int fid) {
SFileGroup *pGroup =
bsearch((void *)&fid, (void *)(pFileH->fGroup), pFileH->numOfFGroups, sizeof(SFileGroup), compFGroupKey);
if (pGroup == NULL) return -1;
// Remove from disk
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
remove(pGroup->files[type].fname);
}
// Adjust the memory
int filesBehind = pFileH->numOfFGroups - (((char *)pGroup - (char *)(pFileH->fGroup)) / sizeof(SFileGroup) + 1);
if (filesBehind > 0) {
memmove((void *)pGroup, (void *)((char *)pGroup + sizeof(SFileGroup)), sizeof(SFileGroup) * filesBehind);
}
pFileH->numOfFGroups--;
return 0;
}
static int compFGroupKey(const void *key, const void *fgroup) {
int fid = *(int *)key;
SFileGroup *pFGroup = (SFileGroup *)fgroup;
return (fid - pFGroup->fileId);
}
static int compFGroup(const void *arg1, const void *arg2) {
return ((SFileGroup *)arg1)->fileId - ((SFileGroup *)arg2)->fileId;
}
static int tsdbWriteFileHead(SFile *pFile) {
char head[TSDB_FILE_HEAD_SIZE] = "\0"; char head[TSDB_FILE_HEAD_SIZE] = "\0";
pFile->size += TSDB_FILE_HEAD_SIZE; pFile->size += TSDB_FILE_HEAD_SIZE;
// TODO: write version and File statistic to the head // TODO: write version and File statistic to the head
lseek(fd, 0, SEEK_SET); lseek(pFile->fd, 0, SEEK_SET);
if (write(fd, head, TSDB_FILE_HEAD_SIZE) < 0) return -1; if (write(pFile->fd, head, TSDB_FILE_HEAD_SIZE) < 0) return -1;
return 0; return 0;
} }
static int tsdbWriteHeadFileIdx(int fd, int maxTables, SFile *pFile) { static int tsdbWriteHeadFileIdx(SFile *pFile, int maxTables) {
int size = sizeof(SCompIdx) * maxTables; int size = sizeof(SCompIdx) * maxTables;
void *buf = calloc(1, size); void *buf = calloc(1, size);
if (buf == NULL) return -1; if (buf == NULL) return -1;
if (lseek(fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) { if (lseek(pFile->fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) {
free(buf); free(buf);
return -1; return -1;
} }
if (write(fd, buf, size) < 0) { if (write(pFile->fd, buf, size) < 0) {
free(buf); free(buf);
return -1; return -1;
} }
pFile->size += size; pFile->size += size;
free(buf);
return 0; return 0;
} }
...@@ -104,12 +158,27 @@ static int tsdbGetFileName(char *dataDir, int fileId, int8_t type, char *fname) ...@@ -104,12 +158,27 @@ static int tsdbGetFileName(char *dataDir, int fileId, int8_t type, char *fname)
return 0; return 0;
} }
/** static int tsdbOpenFileForWrite(SFile *pFile, int oflag) { // TODO: change the function
* Create a file and set the SFile object if (TSDB_IS_FILE_OPENED(pFile)) return -1;
*/
pFile->fd = open(pFile->fname, oflag, 0755);
if (pFile->fd < 0) return -1;
return 0;
}
static int tsdbCloseFile(SFile *pFile) {
if (!TSDB_IS_FILE_OPENED(pFile)) return -1;
int ret = close(pFile->fd);
pFile->fd = -1;
return ret;
}
static int tsdbCreateFile(char *dataDir, int fileId, int8_t type, int maxTables, SFile *pFile) { static int tsdbCreateFile(char *dataDir, int fileId, int8_t type, int maxTables, SFile *pFile) {
memset((void *)pFile, 0, sizeof(SFile)); memset((void *)pFile, 0, sizeof(SFile));
pFile->type = type; pFile->type = type;
pFile->fd = -1;
tsdbGetFileName(dataDir, fileId, type, pFile->fname); tsdbGetFileName(dataDir, fileId, type, pFile->fname);
if (access(pFile->fname, F_OK) == 0) { if (access(pFile->fname, F_OK) == 0) {
...@@ -117,93 +186,28 @@ static int tsdbCreateFile(char *dataDir, int fileId, int8_t type, int maxTables, ...@@ -117,93 +186,28 @@ static int tsdbCreateFile(char *dataDir, int fileId, int8_t type, int maxTables,
return -1; return -1;
} }
int fd = open(pFile->fname, O_WRONLY | O_CREAT, 0755); if (tsdbOpenFileForWrite(pFile, O_WRONLY | O_CREAT) < 0) {
if (fd < 0) return -1; // TODO: deal with the ERROR here
return -1;
}
if (type == TSDB_FILE_TYPE_HEAD) { if (type == TSDB_FILE_TYPE_HEAD) {
if (tsdbWriteHeadFileIdx(fd, maxTables, pFile) < 0) { if (tsdbWriteHeadFileIdx(pFile, maxTables) < 0) {
close(fd); tsdbCloseFile(pFile);
return -1; return -1;
} }
} }
if (tsdbWriteFileHead(fd, pFile) < 0) { if (tsdbWriteFileHead(pFile) < 0) {
close(fd); tsdbCloseFile(pFile);
return -1; return -1;
} }
close(fd); tsdbCloseFile(pFile);
return 0;
}
static int tsdbRemoveFile(SFile *pFile) {
if (pFile == NULL) return -1;
return remove(pFile->fname);
}
// Create a file group with fileId and return a SFileGroup object
int tsdbCreateFileGroup(char *dataDir, int fileId, SFileGroup *pFGroup, int maxTables) {
if (dataDir == NULL || pFGroup == NULL) return -1;
memset((void *)pFGroup, 0, sizeof(SFileGroup));
for (int type = TSDB_FILE_TYPE_HEAD; type < TSDB_FILE_TYPE_MAX; type++) {
if (tsdbCreateFile(dataDir, fileId, type, maxTables, &(pFGroup->files[type])) < 0) {
// TODO: deal with the error here, remove the created files
return -1;
}
}
pFGroup->fileId = fileId;
return 0; return 0;
} }
/**
* Initialize the TSDB file handle
*/
STsdbFileH *tsdbInitFile(char *dataDir, int32_t daysPerFile, int32_t keep, int32_t minRowsPerFBlock,
int32_t maxRowsPerFBlock, int32_t maxTables) {
STsdbFileH *pTsdbFileH =
(STsdbFileH *)calloc(1, sizeof(STsdbFileH) + sizeof(SFileGroup) * tsdbGetMaxNumOfFiles(keep, daysPerFile));
if (pTsdbFileH == NULL) return NULL;
pTsdbFileH->daysPerFile = daysPerFile;
pTsdbFileH->keep = keep;
pTsdbFileH->minRowPerFBlock = minRowsPerFBlock;
pTsdbFileH->maxRowsPerFBlock = maxRowsPerFBlock;
pTsdbFileH->maxTables = maxTables;
// Open the directory to read information of each file
DIR *dir = opendir(dataDir);
if (dir == NULL) {
free(pTsdbFileH);
return NULL;
}
char fname[256];
struct dirent *dp;
while ((dp = readdir(dir)) != NULL) {
if (strncmp(dp->d_name, ".", 1) == 0 || strncmp(dp->d_name, "..", 2) == 0) continue;
if (true /* check if the file is the .head file */) {
int fileId = 0;
int vgId = 0;
sscanf(dp->d_name, "v%df%d.head", &vgId, &fileId);
// TODO
// Open head file
// Open data file
// Open last file
}
}
return pTsdbFileH;
}
void tsdbGetKeyRangeOfFileId(int32_t daysPerFile, int8_t precision, int32_t fileId, TSKEY *minKey, void tsdbGetKeyRangeOfFileId(int32_t daysPerFile, int8_t precision, int32_t fileId, TSKEY *minKey,
TSKEY *maxKey) { TSKEY *maxKey) {
*minKey = fileId * daysPerFile * tsMsPerDay[precision]; *minKey = fileId * daysPerFile * tsMsPerDay[precision];
......
...@@ -182,7 +182,7 @@ tsdb_repo_t *tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg, void *limiter /* TODO ...@@ -182,7 +182,7 @@ tsdb_repo_t *tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg, void *limiter /* TODO
char dataDir[128] = "\0"; char dataDir[128] = "\0";
tsdbGetDataDirName(pRepo, dataDir); tsdbGetDataDirName(pRepo, dataDir);
pRepo->tsdbFileH = pRepo->tsdbFileH =
tsdbInitFile(dataDir, pCfg->daysPerFile, pCfg->keep, pCfg->minRowsPerFileBlock, pCfg->maxRowsPerFileBlock, pCfg->maxTables); tsdbInitFileH(dataDir, pCfg->maxTables);
if (pRepo->tsdbFileH == NULL) { if (pRepo->tsdbFileH == NULL) {
free(pRepo->rootDir); free(pRepo->rootDir);
tsdbFreeCache(pRepo->tsdbCache); tsdbFreeCache(pRepo->tsdbCache);
...@@ -782,19 +782,51 @@ static int tsdbReadRowsFromCache(SSkipListIterator *pIter, TSKEY maxKey, int max ...@@ -782,19 +782,51 @@ static int tsdbReadRowsFromCache(SSkipListIterator *pIter, TSKEY maxKey, int max
return numOfRows; return numOfRows;
} }
static void tsdbDestroyTableIters(SSkipListIterator **iters, int maxTables) {
if (iters == NULL) return;
for (int tid = 0; tid < maxTables; tid++) {
if (iters[tid] == NULL) continue;
tSkipListDestroyIter(iters[tid]);
}
free(iters);
}
static SSkipListIterator **tsdbCreateTableIters(STsdbMeta *pMeta, int maxTables) {
SSkipListIterator **iters = (SSkipListIterator *)calloc(maxTables, sizeof(SSkipListIterator *));
if (iters == NULL) return NULL;
for (int tid = 0; tid < maxTables; tid++) {
STable *pTable = pMeta->tables[tid];
if (pTable == NULL || pTable->imem == NULL) continue;
iters[tid] = tSkipListCreateIter(pTable->imem->pData);
if (iters[tid] == NULL) {
tsdbDestroyTableIters(iters, maxTables);
return NULL;
}
if (!tSkipListIterNext(iters[tid])) {
assert(false);
}
}
return iters;
}
// Commit to file // Commit to file
static void *tsdbCommitToFile(void *arg) { static void *tsdbCommitToFile(void *arg) {
// TODO // TODO
printf("Starting to commit....\n");
STsdbRepo * pRepo = (STsdbRepo *)arg; STsdbRepo * pRepo = (STsdbRepo *)arg;
STsdbMeta * pMeta = pRepo->tsdbMeta; STsdbMeta * pMeta = pRepo->tsdbMeta;
STsdbCache *pCache = pRepo->tsdbCache; STsdbCache *pCache = pRepo->tsdbCache;
STsdbCfg * pCfg = &(pRepo->config); STsdbCfg * pCfg = &(pRepo->config);
if (pCache->imem == NULL) return; if (pCache->imem == NULL) return;
int sfid = tsdbGetKeyFileId(pCache->imem->keyFirst, pCfg->daysPerFile, pCfg->precision); // Create the iterator to read from cache
int efid = tsdbGetKeyFileId(pCache->imem->keyLast, pCfg->daysPerFile, pCfg->precision); SSkipListIterator **iters = tsdbCreateTableIters(pMeta, pCfg->maxTables);
SSkipListIterator **iters = (SSkipListIterator **)calloc(pCfg->maxTables, sizeof(SSkipListIterator *));
if (iters == NULL) { if (iters == NULL) {
// TODO: deal with the error // TODO: deal with the error
return NULL; return NULL;
...@@ -805,10 +837,15 @@ static void *tsdbCommitToFile(void *arg) { ...@@ -805,10 +837,15 @@ static void *tsdbCommitToFile(void *arg) {
SDataCol **cols = (SDataCol **)malloc(sizeof(SDataCol *) * maxCols); SDataCol **cols = (SDataCol **)malloc(sizeof(SDataCol *) * maxCols);
void *buf = malloc((maxBytes + sizeof(SDataCol)) * pCfg->maxRowsPerFileBlock); void *buf = malloc((maxBytes + sizeof(SDataCol)) * pCfg->maxRowsPerFileBlock);
int sfid = tsdbGetKeyFileId(pCache->imem->keyFirst, pCfg->daysPerFile, pCfg->precision);
int efid = tsdbGetKeyFileId(pCache->imem->keyLast, pCfg->daysPerFile, pCfg->precision);
for (int fid = sfid; fid <= efid; fid++) { for (int fid = sfid; fid <= efid; fid++) {
TSKEY minKey = 0, maxKey = 0; TSKEY minKey = 0, maxKey = 0;
tsdbGetKeyRangeOfFileId(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey); tsdbGetKeyRangeOfFileId(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
// tsdbOpenFileForWrite(pRepo, fid);
for (int tid = 0; tid < pCfg->maxTables; tid++) { for (int tid = 0; tid < pCfg->maxTables; tid++) {
STable *pTable = pMeta->tables[tid]; STable *pTable = pMeta->tables[tid];
if (pTable == NULL || pTable->imem == NULL) continue; if (pTable == NULL || pTable->imem == NULL) continue;
...@@ -837,14 +874,10 @@ static void *tsdbCommitToFile(void *arg) { ...@@ -837,14 +874,10 @@ static void *tsdbCommitToFile(void *arg) {
} }
} }
// Free the iterator tsdbDestroyTableIters(iters, pCfg->maxTables);
for (int tid = 0; tid < pCfg->maxTables; tid++) {
if (iters[tid] != NULL) tSkipListDestroyIter(iters[tid]);
}
free(buf); free(buf);
free(cols); free(cols);
free(iters);
tsdbLockRepo(arg); tsdbLockRepo(arg);
tdListMove(pCache->imem->list, pCache->pool.memPool); tdListMove(pCache->imem->list, pCache->pool.memPool);
......
...@@ -142,7 +142,7 @@ TEST(TsdbTest, DISABLED_openRepo) { ...@@ -142,7 +142,7 @@ TEST(TsdbTest, DISABLED_openRepo) {
TEST(TsdbTest, DISABLED_createFileGroup) { TEST(TsdbTest, DISABLED_createFileGroup) {
SFileGroup fGroup; SFileGroup fGroup;
ASSERT_EQ(tsdbCreateFileGroup("/home/ubuntu/work/ttest/vnode0/data", 1820, &fGroup, 1000), 0); // ASSERT_EQ(tsdbCreateFileGroup("/home/ubuntu/work/ttest/vnode0/data", 1820, &fGroup, 1000), 0);
int k = 0; int k = 0;
} }
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册