diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md index 2df6d2cb0eef9af7037076619efb2080568ff9ef..afe0272387b2baea1fa0f6adb638e3abb17d0525 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/webdocs/markdowndocs/cluster-ch.md @@ -107,7 +107,7 @@ CREATE DATABASE demo replica 3; ``` 一个DB里的数据会被切片分到多个vnode group,vnode group里的vnode数目就是DB的副本数,同一个vnode group里各vnode的数据是完全一致的。为保证高可用性,vnode group里的vnode一定要分布在不同的dnode里(实际部署时,需要在不同的物理机上),只要一个vgroup里超过半数的vnode处于工作状态,这个vgroup就能正常的对外服务。 -一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的d读写操作。 +一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。 因为vnode的引入,无法简单的给出结论:“集群中过半dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个节点不工作,那整个集群就无法正常工作了。 diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index be3d476be5df11bf9155634ad665ede5a460d76f..3a31e5b9ebc5bd1801d8fb1a4d76b3c518e0022f 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1347,6 +1347,11 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl if ((pQuery->limit.limit >= 0) && (pQuery->limit.limit + pQuery->limit.offset) <= numOfRes) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + if (((pTableQInfo->lastKey > pTableQInfo->win.ekey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pTableQInfo->lastKey < pTableQInfo->win.ekey) && (!QUERY_IS_ASC_QUERY(pQuery)))) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } } } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 17b0239e3be23c49ff75f9da696dc8fab65055aa..010955388b0ec757c4fe848360354b8ca13362df 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -172,6 +172,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle == NULL) { goto out_of_memory; } + pQueryHandle->order = pCond->order; pQueryHandle->window = pCond->twindow; pQueryHandle->pTsdb = tsdb; @@ -183,6 +184,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab pQueryHandle->qinfo = qinfo; pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->allocSize = 0; + pQueryHandle->locateStart = false; if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) { goto out_of_memory; @@ -193,6 +195,12 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList); assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); + if (ASCENDING_TRAVERSE(pCond->order)) { + assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); + } else { + assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); + } + // allocate buffer in order to load data blocks from file int32_t numOfCols = pCond->numOfCols; @@ -244,6 +252,8 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); taosArrayPush(pQueryHandle->pTableCheckInfo, &info); + tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %p", pQueryHandle, info.tableId.uid, + info.tableId.tid, info.lastKey, qinfo); } } @@ -1072,6 +1082,14 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* TSKEY* tsArray = pCols->cols[0].pData; + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + TSKEY s = tsArray[cur->pos]; + assert(s >= pQueryHandle->window.skey && s <= pQueryHandle->window.ekey); + } else { + TSKEY s = tsArray[cur->pos]; + assert(s <= pQueryHandle->window.skey && s >= pQueryHandle->window.ekey); + } + // for search the endPos, so the order needs to reverse int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC; @@ -1551,7 +1569,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists STableCheckInfo* pCheckInfo = pBlockInfo->pTableCheckInfo; // current block is done, try next - if (!cur->mixBlock || cur->blockCompleted) { + if ((!cur->mixBlock) || cur->blockCompleted) { if ((cur->slot == pQueryHandle->numOfBlocks - 1 && ASCENDING_TRAVERSE(pQueryHandle->order)) || (cur->slot == 0 && !ASCENDING_TRAVERSE(pQueryHandle->order))) { // all data blocks in current file has been checked already, try next file if exists @@ -1570,6 +1588,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists return TSDB_CODE_SUCCESS; } } else { + tsdbDebug("%p continue in current data block, index:%d, %p", pQueryHandle, cur->slot, pQueryHandle->qinfo); handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); *exists = pQueryHandle->realNumOfRows > 0;