未验证 提交 72ce043d 编写于 作者: S slguan 提交者: GitHub

Merge pull request #770 from taosdata/feature/liaohj

Feature/liaohj
......@@ -174,7 +174,7 @@ void queryOnBlock(SMeterQuerySupportObj* pSupporter, int64_t* primaryKeys, int32
SBlockInfo* pBlockBasicInfo, SMeterDataInfo* pDataHeadInfoEx, SField* pFields,
__block_search_fn_t searchFn);
SMeterDataInfo** vnodeFilterQualifiedMeters(SQInfo* pQInfo, int32_t vid, SQueryFileInfo* pQueryFileInfo,
SMeterDataInfo** vnodeFilterQualifiedMeters(SQInfo* pQInfo, int32_t vid, int32_t fileIndex,
tSidSet* pSidSet, SMeterDataInfo* pMeterDataInfo, int32_t* numOfMeters);
int32_t vnodeGetVnodeHeaderFileIdx(int32_t* fid, SQueryRuntimeEnv* pRuntimeEnv, int32_t order);
......@@ -194,6 +194,7 @@ uint32_t getDataBlocksForMeters(SMeterQuerySupportObj* pSupporter, SQuery* pQuer
int32_t numOfMeters, SQueryFileInfo* pQueryFileInfo, SMeterDataInfo** pMeterDataInfo);
int32_t LoadDatablockOnDemand(SCompBlock* pBlock, SField** pFields, int8_t* blkStatus, SQueryRuntimeEnv* pRuntimeEnv,
int32_t fileIdx, int32_t slotIdx, __block_search_fn_t searchFn, bool onDemand);
char *vnodeGetHeaderFileData(SQueryRuntimeEnv *pRuntimeEnv, int32_t fileIndex);
/**
* Create SMeterQueryInfo.
......
......@@ -57,9 +57,9 @@ typedef struct SQueryFileInfo {
char lastFilePath[256];
int32_t defaultMappingSize; /* default mapping size */
int32_t headerFd; /* file handler */
char* pHeaderFileData; /* mmap header files */
size_t headFileSize;
int32_t headerFd; /* file handler */
char* pHeaderFileData; /* mmap header files */
size_t headFileSize;
int32_t dataFd;
char* pDataFileData;
size_t dataFileSize;
......@@ -107,44 +107,40 @@ typedef struct SOutputRes {
} SOutputRes;
typedef struct RuntimeEnvironment {
SPositionInfo startPos; /* the start position, used for secondary/third iteration */
SPositionInfo endPos; /* the last access position in query, served as the start pos of reversed order query */
SPositionInfo nextPos; /* start position of the next scan */
SData* colDataBuffer[TSDB_MAX_COLUMNS];
SResultInfo* resultInfo;
// Indicate if data block is loaded, the block is first/last/internal block
int8_t blockStatus;
int32_t unzipBufSize;
SData* primaryColBuffer;
char* unzipBuffer;
char* secondaryUnzipBuffer;
SQuery* pQuery;
SMeterObj* pMeterObj;
SQLFunctionCtx* pCtx;
SQueryLoadBlockInfo loadBlockInfo; /* record current block load information */
SPositionInfo startPos; /* the start position, used for secondary/third iteration */
SPositionInfo endPos; /* the last access position in query, served as the start pos of reversed order query */
SPositionInfo nextPos; /* start position of the next scan */
SData* colDataBuffer[TSDB_MAX_COLUMNS];
SResultInfo* resultInfo;
uint8_t blockStatus; // Indicate if data block is loaded, the block is first/last/internal block
int32_t unzipBufSize;
SData* primaryColBuffer;
char* unzipBuffer;
char* secondaryUnzipBuffer;
SQuery* pQuery;
SMeterObj* pMeterObj;
SQLFunctionCtx* pCtx;
SQueryLoadBlockInfo loadBlockInfo; /* record current block load information */
SQueryLoadCompBlockInfo loadCompBlockInfo; /* record current compblock information in SQuery */
/*
* header files info, avoid to iterate the directory, the data is acquired
* during in query preparation function
*/
SQueryFileInfo* pHeaderFiles;
uint32_t numOfFiles; /* number of files of one vnode during query execution */
int16_t numOfRowsPerPage;
int16_t offset[TSDB_MAX_COLUMNS];
int16_t scanFlag; /* denotes reversed scan of data or not */
SQueryFileInfo* pVnodeFiles;
uint32_t numOfFiles; // the total available number of files for this virtual node during query execution
int32_t mmapedHFileIndex; // the mmaped header file, NOTE: only one header file can be mmap.
int16_t numOfRowsPerPage;
int16_t offset[TSDB_MAX_COLUMNS];
int16_t scanFlag; // denotes reversed scan of data or not
SInterpolationInfo interpoInfo;
SData** pInterpoBuf;
SOutputRes* pResult; // reference to SQuerySupporter->pResult
void* hashList;
int32_t usedIndex; // assigned SOutputRes in list
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostSummary summary;
STSBuf* pTSBuf;
STSCursor cur;
SQueryCostSummary summary;
} SQueryRuntimeEnv;
/* intermediate result during multimeter query involves interval */
......
......@@ -289,12 +289,11 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe
pQuery->fileId = fid;
pSummary->numOfFiles++;
SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pHeaderFiles[fileIdx];
char * pHeaderData = pQueryFileInfo->pHeaderFileData;
SQueryFileInfo *pQueryFileInfo = &pRuntimeEnv->pVnodeFiles[fileIdx];
int32_t numOfQualifiedMeters = 0;
SMeterDataInfo **pReqMeterDataInfo = vnodeFilterQualifiedMeters(
pQInfo, vnodeId, pQueryFileInfo, pSupporter->pSidSet, pMeterDataInfo, &numOfQualifiedMeters);
SMeterDataInfo **pReqMeterDataInfo = vnodeFilterQualifiedMeters(pQInfo, vnodeId, fileIdx, pSupporter->pSidSet,
pMeterDataInfo, &numOfQualifiedMeters);
dTrace("QInfo:%p file:%s, %d meters qualified", pQInfo, pQueryFileInfo->dataFilePath, numOfQualifiedMeters);
if (pReqMeterDataInfo == NULL) {
......@@ -312,6 +311,11 @@ static SMeterDataInfo *queryOnMultiDataFiles(SQInfo *pQInfo, SMeterDataInfo *pMe
continue;
}
char *pHeaderData = vnodeGetHeaderFileData(pRuntimeEnv, fileIdx);
if (pHeaderData == NULL) { // failed to mmap header file into buffer
continue;
}
uint32_t numOfBlocks = getDataBlocksForMeters(pSupporter, pQuery, pHeaderData, numOfQualifiedMeters, pQueryFileInfo,
pReqMeterDataInfo);
......@@ -500,7 +504,7 @@ static int64_t doCheckMetersInGroup(SQInfo *pQInfo, int32_t index, int32_t start
#if DEFAULT_IO_ENGINE == IO_ENGINE_MMAP
for (int32_t i = 0; i < pRuntimeEnv->numOfFiles; ++i) {
resetMMapWindow(&pRuntimeEnv->pHeaderFiles[i]);
resetMMapWindow(&pRuntimeEnv->pVnodeFiles[i]);
}
#endif
......@@ -670,7 +674,7 @@ static void vnodeMultiMeterMultiOutputProcessor(SQInfo *pQInfo) {
#if DEFAULT_IO_ENGINE == IO_ENGINE_MMAP
for (int32_t i = 0; i < pRuntimeEnv->numOfFiles; ++i) {
resetMMapWindow(&pRuntimeEnv->pHeaderFiles[i]);
resetMMapWindow(&pRuntimeEnv->pVnodeFiles[i]);
}
#endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册