提交 d208282a 编写于 作者: H Haojun Liao

refactor: do some internal refactor.

上级 9dd07cc1
......@@ -123,8 +123,6 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
}
static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket* pBucket) {
ASSERT(pPage != NULL && pNode != NULL && pBucket->size >= 1);
int32_t len = GET_LHASH_NODE_LEN(pNode);
char* p = (char*)pNode + len;
......@@ -301,8 +299,6 @@ void* tHashCleanup(SLHashObj* pHashObj) {
}
int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data, size_t size) {
ASSERT(pHashObj != NULL && key != NULL);
if (pHashObj->bits == 0) {
SLHashBucket* pBucket = pHashObj->pBucket[0];
doAddToBucket(pHashObj, pBucket, 0, key, keyLen, data, size);
......@@ -363,14 +359,12 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data
if (v1 != splitBucketId) { // place it into the new bucket
ASSERT(v1 == newBucketId);
// printf("move key:%d to 0x%x bucket, remain items:%d\n", *(int32_t*)k, v1, pBucket->size - 1);
SLHashBucket* pNewBucket = pHashObj->pBucket[newBucketId];
doAddToBucket(pHashObj, pNewBucket, newBucketId, (void*)GET_LHASH_NODE_KEY(pNode), pNode->keyLen,
GET_LHASH_NODE_KEY(pNode), pNode->dataLen);
doRemoveFromBucket(p, pNode, pBucket);
} else {
// printf("check key:%d, located into: %d, skip it\n", *(int*) k, v1);
int32_t nodeSize = GET_LHASH_NODE_LEN(pStart);
pStart += nodeSize;
}
......@@ -385,7 +379,6 @@ int32_t tHashPut(SLHashObj* pHashObj, const void* key, size_t keyLen, void* data
}
char* tHashGet(SLHashObj* pHashObj, const void* key, size_t keyLen) {
ASSERT(pHashObj != NULL && key != NULL && keyLen > 0);
int32_t hashv = pHashObj->hashFn(key, keyLen);
int32_t bucketId = doGetBucketIdFromHashVal(hashv, pHashObj->bits);
......
......@@ -346,8 +346,6 @@ void tMemBucketUpdateBoundingBox(MinMaxEntry *r, const char *data, int32_t dataT
* in memory bucket, we only accept data array list
*/
int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
ASSERT(pBucket != NULL && data != NULL && size > 0);
int32_t count = 0;
int32_t bytes = pBucket->bytes;
for (int32_t i = 0; i < size; ++i) {
......
......@@ -132,7 +132,6 @@ static char* doFlushPageToDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) {
if (pg->offset == -1 || pg->dirty) {
void* payload = GET_DATA_PAYLOAD(pg);
t = doCompressData(payload, pBuf->pageSize, &size, pBuf);
ASSERTS(size >= 0, "size is negative");
}
// this page is flushed to disk for the first time
......@@ -272,13 +271,15 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
SListNode* pn = NULL;
while ((pn = tdListNext(&iter)) != NULL) {
SPageInfo* pageInfo = *(SPageInfo**)pn->data;
ASSERT(pageInfo->pageId >= 0 && pageInfo->pn == pn);
if (pageInfo->pageId < 0 || pageInfo->pn != pn) {
uError("data in consistent in paged buffer, %s", pBuf->id);
return NULL;
}
if (!pageInfo->used) {
// printf("%d is chosen\n", pageInfo->pageId);
break;
} else {
// printf("page %d is used, dirty:%d\n", pageInfo->pageId, pageInfo->dirty);
// printf("page %d is used, dirty:%d\n", pageInfo->pageId, pageInfo->dirty);
}
}
......@@ -353,7 +354,9 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
pPBuf->freePgList = tdListNew(POINTER_BYTES);
// at least more than 2 pages must be in memory
ASSERT(inMemBufSize >= pagesize * 2);
if (pPBuf->inMemPages < 2) {
pPBuf->inMemPages = 2;
}
pPBuf->lruList = tdListNew(POINTER_BYTES);
......@@ -418,11 +421,17 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) {
}
void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
ASSERT(pBuf != NULL && id >= 0);
if (id < 0) {
return NULL;
}
pBuf->statis.getPages += 1;
SPageInfo** pi = taosHashGet(pBuf->all, &id, sizeof(int32_t));
ASSERT(pi != NULL && *pi != NULL);
if (pi == NULL || *pi == NULL) {
uError("no pages exist, id:%d, %s", id, pBuf->id);
return NULL;
}
if ((*pi)->pData != NULL) { // it is in memory
// no need to update the LRU list if only one page exists
......@@ -432,7 +441,10 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
}
SPageInfo** pInfo = (SPageInfo**)((*pi)->pn->data);
ASSERT(*pInfo == *pi);
if (*pInfo != *pi) {
uError("data inconsistent in paged buf, %s", pBuf->id);
return NULL;
}
lruListMoveToFront(pBuf->lruList, (*pi));
(*pi)->used = true;
......@@ -479,9 +491,6 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
}
void releaseBufPage(SDiskbasedBuf* pBuf, void* page) {
if (ASSERTS(pBuf != NULL && page != NULL, "pBuf or page is NULL")) {
return;
}
SPageInfo* ppi = getPageInfoFromPayload(page);
releaseBufPageInfo(pBuf, ppi);
}
......@@ -490,7 +499,7 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
#ifdef BUF_PAGE_DEBUG
uDebug("page_releaseBufPageInfo pageId:%d, used:%d, offset:%" PRId64, pi->pageId, pi->used, pi->offset);
#endif
if (ASSERTS(pi->pData != NULL, "pi->pData is NULL")) {
if (pi->pData == NULL) {
return;
}
......@@ -501,7 +510,6 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; }
SArray* getDataBufPagesIdList(SDiskbasedBuf* pBuf) {
ASSERT(pBuf != NULL);
return pBuf->pIdList;
}
......@@ -579,7 +587,6 @@ SPageInfo* getLastPageInfo(SArray* pList) {
}
int32_t getPageId(const SPageInfo* pPgInfo) {
ASSERT(pPgInfo != NULL);
return pPgInfo->pageId;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册