提交 18ee5d97 编写于 作者: haoranc's avatar haoranc

Merge branch 'main' of https://github.com/taosdata/TDengine into test/main/TD-22157

......@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "3.0.3.1")
SET(TD_VER_NUMBER "3.0.3.2")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 273a3fe
GIT_TAG 01195d6
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
......
......@@ -208,15 +208,12 @@ typedef struct SSDataBlock {
} SSDataBlock;
enum {
FETCH_TYPE__DATA = 1,
FETCH_TYPE__META,
FETCH_TYPE__SEP,
FETCH_TYPE__DATA = 0,
FETCH_TYPE__NONE,
};
typedef struct {
int8_t fetchType;
STqOffsetVal offset;
union {
SSDataBlock data;
void* meta;
......
......@@ -127,24 +127,24 @@
#define TK_NK_EQ 109
#define TK_USING 110
#define TK_TAGS 111
#define TK_COMMENT 112
#define TK_BOOL 113
#define TK_TINYINT 114
#define TK_SMALLINT 115
#define TK_INT 116
#define TK_INTEGER 117
#define TK_BIGINT 118
#define TK_FLOAT 119
#define TK_DOUBLE 120
#define TK_BINARY 121
#define TK_NCHAR 122
#define TK_UNSIGNED 123
#define TK_JSON 124
#define TK_VARCHAR 125
#define TK_MEDIUMBLOB 126
#define TK_BLOB 127
#define TK_VARBINARY 128
#define TK_DECIMAL 129
#define TK_BOOL 112
#define TK_TINYINT 113
#define TK_SMALLINT 114
#define TK_INT 115
#define TK_INTEGER 116
#define TK_BIGINT 117
#define TK_FLOAT 118
#define TK_DOUBLE 119
#define TK_BINARY 120
#define TK_NCHAR 121
#define TK_UNSIGNED 122
#define TK_JSON 123
#define TK_VARCHAR 124
#define TK_MEDIUMBLOB 125
#define TK_BLOB 126
#define TK_VARBINARY 127
#define TK_DECIMAL 128
#define TK_COMMENT 129
#define TK_MAX_DELAY 130
#define TK_WATERMARK 131
#define TK_ROLLUP 132
......
......@@ -198,11 +198,11 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
//
int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit);
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
void qStreamSetOpen(qTaskInfo_t tinfo);
SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
void qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo);
SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo);
......
......@@ -223,27 +223,12 @@ static FORCE_INLINE void* streamQueueCurItem(SStreamQueue* queue) {
return queue->qItem;
}
static FORCE_INLINE void* streamQueueNextItem(SStreamQueue* queue) {
int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING);
if (dequeueFlag == STREAM_QUEUE__FAILED) {
ASSERT(queue->qItem != NULL);
return streamQueueCurItem(queue);
} else {
queue->qItem = NULL;
taosGetQitem(queue->qall, &queue->qItem);
if (queue->qItem == NULL) {
taosReadAllQitems(queue->queue, queue->qall);
taosGetQitem(queue->qall, &queue->qItem);
}
return streamQueueCurItem(queue);
}
}
void* streamQueueNextItem(SStreamQueue* queue);
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit);
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit);
void streamDataSubmitRefDec(SStreamDataSubmit2* pDataSubmit);
SStreamDataSubmit2* streamSubmitRefClone(SStreamDataSubmit2* pSubmit);
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit);
typedef struct {
char* qmsg;
......@@ -371,43 +356,7 @@ SStreamTask* tNewSStreamTask(int64_t streamId);
int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeSStreamTask(SStreamTask* pTask);
static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem* pItem) {
int8_t type = pItem->type;
if (type == STREAM_INPUT__DATA_SUBMIT) {
SStreamDataSubmit2* pSubmitClone = streamSubmitRefClone((SStreamDataSubmit2*)pItem);
if (pSubmitClone == NULL) {
qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask);
terrno = TSDB_CODE_OUT_OF_MEMORY;
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
return -1;
}
qDebug("task %d %p submit enqueue %p %p %p %d %" PRId64, pTask->taskId, pTask, pItem, pSubmitClone,
pSubmitClone->submit.msgStr, pSubmitClone->submit.msgLen, pSubmitClone->submit.ver);
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
// qStreamInput(pTask->exec.executor, pSubmitClone);
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
type == STREAM_INPUT__REF_DATA_BLOCK) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
// qStreamInput(pTask->exec.executor, pItem);
} else if (type == STREAM_INPUT__CHECKPOINT) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
// qStreamInput(pTask->exec.executor, pItem);
} else if (type == STREAM_INPUT__GET_RES) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
// qStreamInput(pTask->exec.executor, pItem);
}
if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) {
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
}
#if 0
// TODO: back pressure
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__NORMAL);
#endif
return 0;
}
int32_t tAppendDataForStream(SStreamTask* pTask, SStreamQueueItem* pItem);
static FORCE_INLINE void streamTaskInputFail(SStreamTask* pTask) {
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
......
......@@ -146,8 +146,8 @@ typedef struct {
int64_t curFileFirstVer;
int64_t curVersion;
int64_t capacity;
int8_t curInvalid;
int8_t curStopped;
// int8_t curInvalid;
// int8_t curStopped;
TdThreadMutex mutex;
SWalFilterCond cond;
// TODO remove it
......
......@@ -758,6 +758,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TMQ_INVALID_MSG TAOS_DEF_ERROR_CODE(0, 0x4000)
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
#define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002)
#define TSDB_CODE_TMQ_CONSUMER_ERROR TAOS_DEF_ERROR_CODE(0, 0x4003)
// stream
#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)
......
......@@ -61,7 +61,7 @@ typedef void (*FItems)(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfItems);
typedef struct STaosQnode STaosQnode;
typedef struct STaosQnode {
struct STaosQnode {
STaosQnode *next;
STaosQueue *queue;
int64_t timestamp;
......@@ -70,9 +70,9 @@ typedef struct STaosQnode {
int8_t itype;
int8_t reserved[3];
char item[];
} STaosQnode;
};
typedef struct STaosQueue {
struct STaosQueue {
STaosQnode *head;
STaosQnode *tail;
STaosQueue *next; // for queue set
......@@ -84,22 +84,22 @@ typedef struct STaosQueue {
int64_t memOfItems;
int32_t numOfItems;
int64_t threadId;
} STaosQueue;
};
typedef struct STaosQset {
struct STaosQset {
STaosQueue *head;
STaosQueue *current;
TdThreadMutex mutex;
tsem_t sem;
int32_t numOfQueues;
int32_t numOfItems;
} STaosQset;
};
typedef struct STaosQall {
struct STaosQall {
STaosQnode *current;
STaosQnode *start;
int32_t numOfItems;
} STaosQall;
};
STaosQueue *taosOpenQueue();
void taosCloseQueue(STaosQueue *queue);
......
......@@ -533,8 +533,7 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
uint16_t *index = colHash ? (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen) : NULL;
if (index) {
if (colField[*index].type != kv->type) {
uError("SML:0x%" PRIx64 " point type and db type mismatch. key: %s. point type: %d, db type: %d", info->id,
kv->key, colField[*index].type, kv->type);
uError("SML:0x%" PRIx64 " point type and db type mismatch. point type: %d, db type: %d, key: %s", info->id, colField[*index].type, kv->type, kv->key);
return TSDB_CODE_TSC_INVALID_VALUE;
}
......
......@@ -107,7 +107,6 @@ struct tmq_t {
STaosQueue* mqueue; // queue of rsp
STaosQall* qall;
STaosQueue* delayedTask; // delayed task queue for heartbeat and auto commit
TdThreadMutex lock; // used to protect the operation on each topic, when updating the epsets.
tsem_t rspSem;
};
......@@ -188,7 +187,6 @@ typedef struct {
SMqClientVg* pVg;
SMqClientTopic* pTopic;
int32_t vgId;
tsem_t rspSem;
uint64_t requestId; // request id for debug purpose
} SMqPollCbParam;
......@@ -979,7 +977,6 @@ void tmqFreeImpl(void* handle) {
taosFreeQall(tmq->qall);
tsem_destroy(&tmq->rspSem);
taosThreadMutexDestroy(&tmq->lock);
taosArrayDestroyEx(tmq->clientTopics, freeClientVgImpl);
taos_close_internal(tmq->pTscObj);
......@@ -1024,7 +1021,6 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->delayedTask = taosOpenQueue();
pTmq->qall = taosAllocateQall();
taosThreadMutexInit(&pTmq->lock, NULL);
if (pTmq->clientTopics == NULL || pTmq->mqueue == NULL || pTmq->qall == NULL || pTmq->delayedTask == NULL ||
conf->groupId[0] == 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
......@@ -1233,7 +1229,6 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
if (tmq == NULL) {
tsem_destroy(&pParam->rspSem);
taosMemoryFree(pParam);
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
......@@ -1269,6 +1264,8 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
pRspWrapper->tmqRspType = TMQ_MSG_TYPE__END_RSP;
taosWriteQitem(tmq->mqueue, pRspWrapper);
}else if(code == TSDB_CODE_WAL_LOG_NOT_EXIST){ //poll data while insert
taosMsleep(500);
}
goto CREATE_MSG_FAIL;
......@@ -1343,8 +1340,9 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pData);
taosWriteQitem(tmq->mqueue, pRspWrapper);
int32_t total = taosQueueItemSize(tmq->mqueue);
tscDebug("consumer:0x%" PRIx64 " put poll res into mqueue, type:%d, vgId:%d, total in queue:%d, reqId:0x%" PRIx64,
tmq->consumerId, rspType, vgId, tmq->mqueue->numOfItems, requestId);
tmq->consumerId, rspType, vgId, total, requestId);
tsem_post(&tmq->rspSem);
taosReleaseRef(tmqMgmt.rsetId, refId);
......@@ -1419,7 +1417,7 @@ static void freeClientVgInfo(void* param) {
taosArrayDestroy(pTopic->vgs);
}
static bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
bool set = false;
int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
......@@ -1471,14 +1469,11 @@ static bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp) {
taosHashCleanup(pVgOffsetHashMap);
taosThreadMutexLock(&tmq->lock);
// destroy current buffered existed topics info
if (tmq->clientTopics) {
taosArrayDestroyEx(tmq->clientTopics, freeClientVgInfo);
}
tmq->clientTopics = newTopics;
taosThreadMutexUnlock(&tmq->lock);
int8_t flag = (topicNumGet == 0)? TMQ_CONSUMER_STATUS__NO_TOPIC:TMQ_CONSUMER_STATUS__READY;
atomic_store_8(&tmq->status, flag);
......@@ -1742,7 +1737,7 @@ static int32_t tmqHandleNoPollRsp(tmq_t* tmq, SMqRspWrapper* rspWrapper, bool* p
if (rspWrapper->epoch > atomic_load_32(&tmq->epoch)) {
SMqAskEpRspWrapper* pEpRspWrapper = (SMqAskEpRspWrapper*)rspWrapper;
SMqAskEpRsp* rspMsg = &pEpRspWrapper->msg;
tmqUpdateEp(tmq, rspWrapper->epoch, rspMsg);
doUpdateLocalEp(tmq, rspWrapper->epoch, rspMsg);
/*tmqClearUnhandleMsg(tmq);*/
tDeleteSMqAskEpRsp(rspMsg);
*pReset = true;
......@@ -2163,7 +2158,7 @@ void updateEpCallbackFn(tmq_t* pTmq, int32_t code, SDataBuf* pDataBuf, void* par
SMqAskEpRsp rsp;
tDecodeSMqAskEpRsp(POINTER_SHIFT(pDataBuf->pData, sizeof(SMqRspHead)), &rsp);
tmqUpdateEp(pTmq, head->epoch, &rsp);
doUpdateLocalEp(pTmq, head->epoch, &rsp);
tDeleteSMqAskEpRsp(&rsp);
}
......
......@@ -161,10 +161,10 @@ void *queryThread(void *arg) {
return NULL;
}
static int32_t numOfThreads = 1;
int32_t numOfThreads = 1;
void tmq_commit_cb_print(tmq_t *pTmq, int32_t code, void *param) {
printf("success, code:%d\n", code);
printf("auto commit success, code:%d\n\n\n\n", code);
}
void* doConsumeData(void* param) {
......@@ -173,7 +173,7 @@ void* doConsumeData(void* param) {
tmq_conf_t* conf = tmq_conf_new();
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
tmq_conf_set(conf, "group.id", "cgrpName12");
tmq_conf_set(conf, "group.id", "cgrpName41");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
......@@ -1060,7 +1060,7 @@ TEST(clientCase, sub_tb_test) {
tmq_conf_t* conf = tmq_conf_new();
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "auto.commit.interval.ms", "1000");
tmq_conf_set(conf, "group.id", "cgrpName27");
tmq_conf_set(conf, "group.id", "cgrpName45");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
......
......@@ -7212,6 +7212,7 @@ void tDestroySSubmitReq2(SSubmitReq2 *pReq, int32_t flag) {
tDestroySSubmitTbData(&aSubmitTbData[i], flag);
}
taosArrayDestroy(pReq->aSubmitTbData);
pReq->aSubmitTbData = NULL;
}
int32_t tEncodeSSubmitRsp2(SEncoder *pCoder, const SSubmitRsp2 *pRsp) {
......
......@@ -24,10 +24,10 @@ extern "C" {
enum {
MQ_CONSUMER_STATUS__MODIFY = 1,
MQ_CONSUMER_STATUS__MODIFY_IN_REB, // this value is not used anymore
// MQ_CONSUMER_STATUS__MODIFY_IN_REB, // this value is not used anymore
MQ_CONSUMER_STATUS__READY,
MQ_CONSUMER_STATUS__LOST,
MQ_CONSUMER_STATUS__LOST_IN_REB, // this value is not used anymore
// MQ_CONSUMER_STATUS__LOST_IN_REB, // this value is not used anymore
MQ_CONSUMER_STATUS__LOST_REBD,
MQ_CONSUMER_STATUS__REMOVED,
};
......
......@@ -337,7 +337,7 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
taosArrayPush(pRebSub->removedConsumers, &pConsumer->consumerId);
}
taosRUnLockLatch(&pConsumer->lock);
} else if (status == MQ_CONSUMER_STATUS__MODIFY || status == MQ_CONSUMER_STATUS__MODIFY_IN_REB) {
} else if (status == MQ_CONSUMER_STATUS__MODIFY) {
taosRLockLatch(&pConsumer->lock);
int32_t newTopicNum = taosArrayGetSize(pConsumer->rebNewTopics);
......@@ -876,17 +876,11 @@ static void updateConsumerStatus(SMqConsumerObj* pConsumer) {
int32_t status = pConsumer->status;
if (taosArrayGetSize(pConsumer->rebNewTopics) == 0 && taosArrayGetSize(pConsumer->rebRemovedTopics) == 0) {
if (status == MQ_CONSUMER_STATUS__MODIFY || status == MQ_CONSUMER_STATUS__MODIFY_IN_REB) {
if (status == MQ_CONSUMER_STATUS__MODIFY) {
pConsumer->status = MQ_CONSUMER_STATUS__READY;
} else if (status == MQ_CONSUMER_STATUS__LOST_IN_REB || status == MQ_CONSUMER_STATUS__LOST) {
} else if (status == MQ_CONSUMER_STATUS__LOST) {
pConsumer->status = MQ_CONSUMER_STATUS__LOST_REBD;
}
} else {
if (status == MQ_CONSUMER_STATUS__MODIFY || status == MQ_CONSUMER_STATUS__MODIFY_IN_REB) {
pConsumer->status = MQ_CONSUMER_STATUS__MODIFY;
} else if (status == MQ_CONSUMER_STATUS__LOST || status == MQ_CONSUMER_STATUS__LOST_IN_REB) {
pConsumer->status = MQ_CONSUMER_STATUS__LOST;
}
}
}
......@@ -1195,10 +1189,8 @@ static const char *mndConsumerStatusName(int status) {
return "ready";
case MQ_CONSUMER_STATUS__LOST:
case MQ_CONSUMER_STATUS__LOST_REBD:
case MQ_CONSUMER_STATUS__LOST_IN_REB:
return "lost";
case MQ_CONSUMER_STATUS__MODIFY:
case MQ_CONSUMER_STATUS__MODIFY_IN_REB:
return "rebalancing";
default:
return "unknown";
......
......@@ -228,19 +228,12 @@ typedef struct SSnapContext {
SArray *idList;
int32_t index;
bool withMeta;
bool queryMetaOrData; // true-get meta, false-get data
bool queryMeta; // true-get meta, false-get data
} SSnapContext;
typedef struct STqReader {
// const SSubmitReq *pMsg;
// SSubmitBlk *pBlock;
// SSubmitMsgIter msgIter;
// SSubmitBlkIter blkIter;
int64_t ver;
SPackedData msg2;
int8_t setMsg;
SSubmitReq2 submit;
int32_t nextBlk;
......@@ -267,7 +260,7 @@ int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqSeekVer(STqReader *pReader, int64_t ver, const char *id);
int32_t tqNextBlock(STqReader *pReader, SFetchRet *ret);
void tqNextBlock(STqReader *pReader, SFetchRet *ret);
int32_t tqReaderSetSubmitReq2(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
// int32_t tqReaderSetDataMsg(STqReader *pReader, const SSubmitReq *pMsg, int64_t ver);
......
......@@ -268,7 +268,7 @@ int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t
ctx->snapVersion = snapVersion;
ctx->suid = suid;
ctx->subType = subType;
ctx->queryMetaOrData = withMeta;
ctx->queryMeta = withMeta;
ctx->withMeta = withMeta;
ctx->idVersion = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
if (ctx->idVersion == NULL) {
......@@ -475,7 +475,7 @@ int32_t getMetafromSnapShot(SSnapContext* ctx, void** pBuf, int32_t* contLen, in
if (ctx->index >= taosArrayGetSize(ctx->idList)) {
metaDebug("tmqsnap get meta end");
ctx->index = 0;
ctx->queryMetaOrData = false; // change to get data
ctx->queryMeta = false; // change to get data
return 0;
}
......
......@@ -236,21 +236,6 @@ int32_t tqPushDataRsp(STQ* pTq, STqPushEntry* pPushEntry) {
}
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp, int32_t type) {
#if 0
A(taosArrayGetSize(pRsp->blockData) == pRsp->blockNum);
A(taosArrayGetSize(pRsp->blockDataLen) == pRsp->blockNum);
A(!pRsp->withSchema);
A(taosArrayGetSize(pRsp->blockSchema) == 0);
if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
if (pRsp->blockNum > 0) {
A(pRsp->rspOffset.version > pRsp->reqOffset.version);
} else {
A(pRsp->rspOffset.version >= pRsp->reqOffset.version);
}
}
#endif
doSendDataRsp(&pMsg->info, pRsp, pReq->epoch, pReq->consumerId, type);
char buf1[80] = {0};
......@@ -352,17 +337,6 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su
}
pRsp->withTbName = 0;
#if 0
pRsp->withTbName = pReq->withTbName;
if (pRsp->withTbName) {
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
if (pRsp->blockTbName == NULL) {
// TODO free
return -1;
}
}
#endif
pRsp->withSchema = false;
return 0;
}
......@@ -421,6 +395,7 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
return -1;
}
// offset set to previous version when init
tqOffsetResetToLog(pOffsetVal, pHandle->pRef->refVer - 1);
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
......@@ -461,7 +436,6 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
SRpcMsg* pMsg, STqOffsetVal* pOffset) {
int32_t code = 0;
uint64_t consumerId = pRequest->consumerId;
int32_t vgId = TD_VID(pTq->pVnode);
......@@ -472,10 +446,9 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
taosWLockLatch(&pTq->lock);
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
if (code != TSDB_CODE_SUCCESS) {
taosWUnLockLatch(&pTq->lock);
return code;
int code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
if(code != 0) {
goto end;
}
// till now, all data has been transferred to consumer, new data needs to push client once arrived.
......@@ -486,89 +459,60 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
return code;
}
taosWUnLockLatch(&pTq->lock);
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&dataRsp, TMQ_MSG_TYPE__POLL_RSP);
// NOTE: this pHandle->consumerId may have been changed already.
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, offset type:%d, uid/version:%" PRId64
", ts:%" PRId64 ", reqId:0x%" PRIx64,
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, dataRsp.rspOffset.type, dataRsp.rspOffset.uid,
dataRsp.rspOffset.ts, pRequest->reqId);
tDeleteSMqDataRsp(&dataRsp);
end:
{
char buf[80] = {0};
tFormatOffset(buf, 80, &dataRsp.rspOffset);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, rsp block:%d, rsp offset type:%s, reqId:0x%" PRIx64 " code:%d",
consumerId, pHandle->subKey, vgId, dataRsp.blockNum, buf, pRequest->reqId, code);
taosWUnLockLatch(&pTq->lock);
tDeleteSMqDataRsp(&dataRsp);
}
return code;
}
static int32_t doPollDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) {
int32_t code = -1;
STqOffsetVal offset = {0};
SWalCkHead* pCkHead = NULL;
int32_t vgId = TD_VID(pTq->pVnode);
STqOffsetVal reqOffset = pRequest->reqOffset;
uint64_t consumerId = pRequest->consumerId;
// 1. reset the offset if needed
if (IS_OFFSET_RESET_TYPE(reqOffset.type)) {
// handle the reset offset cases, according to the consumer's choice.
bool blockReturned = false;
code = extractResetOffsetVal(&offset, pTq, pHandle, pRequest, pMsg, &blockReturned);
if (code != 0) {
return code;
}
// empty block returned, quit
if (blockReturned) {
return 0;
}
} else { // use the consumer specified offset
// the offset value can not be monotonious increase??
offset = reqOffset;
}
// this is a normal subscribe requirement
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
}
// todo handle the case where re-balance occurs.
// for taosx
static int32_t extractDataAndRspForDbStbSubscribe(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, STqOffsetVal *offset) {
int code = 0;
int32_t vgId = TD_VID(pTq->pVnode);
SWalCkHead* pCkHead = NULL;
SMqMetaRsp metaRsp = {0};
STaosxRsp taosxRsp = {0};
tqInitTaosxRsp(&taosxRsp, pRequest);
if (offset.type != TMQ_OFFSET__LOG) {
if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, &offset) < 0) {
if (offset->type != TMQ_OFFSET__LOG) {
if (tqScanTaosx(pTq, pHandle, &taosxRsp, &metaRsp, offset) < 0) {
return -1;
}
if (metaRsp.metaRspLen > 0) {
code = tqSendMetaPollRsp(pTq, pMsg, pRequest, &metaRsp);
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64
",ts:%" PRId64,
consumerId, pHandle->subKey, vgId, metaRsp.rspOffset.type, metaRsp.rspOffset.uid,
metaRsp.rspOffset.ts);
tqDebug("tmq poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send meta offset type:%d,uid:%" PRId64 ",ts:%" PRId64,
pRequest->consumerId, pHandle->subKey, vgId, metaRsp.rspOffset.type, metaRsp.rspOffset.uid, metaRsp.rspOffset.ts);
taosMemoryFree(metaRsp.metaRsp);
tDeleteSTaosxRsp(&taosxRsp);
return code;
}
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
",ts:%" PRId64,pRequest->consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type, taosxRsp.rspOffset.uid,taosxRsp.rspOffset.ts);
if (taosxRsp.blockNum > 0) {
code = tqSendDataRsp(pTq, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP);
tDeleteSTaosxRsp(&taosxRsp);
return code;
} else {
offset = taosxRsp.rspOffset;
}else {
*offset = taosxRsp.rspOffset;
}
tqDebug("taosx poll: consumer:0x%" PRIx64 " subkey:%s vgId:%d, send data blockNum:%d, offset type:%d,uid:%" PRId64
",version:%" PRId64,
consumerId, pHandle->subKey, vgId, taosxRsp.blockNum, taosxRsp.rspOffset.type, taosxRsp.rspOffset.uid,
taosxRsp.rspOffset.version);
}
if (offset.type == TMQ_OFFSET__LOG) {
int64_t fetchVer = offset.version + 1;
if (offset->type == TMQ_OFFSET__LOG) {
int64_t fetchVer = offset->version + 1;
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
if (pCkHead == NULL) {
tDeleteSTaosxRsp(&taosxRsp);
......@@ -578,12 +522,10 @@ static int32_t doPollDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* p
walSetReaderCapacity(pHandle->pWalReader, 2048);
int totalRows = 0;
while (1) {
// todo refactor: this is not correct.
int32_t savedEpoch = atomic_load_32(&pHandle->epoch);
if (savedEpoch > pRequest->epoch) {
tqWarn("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey:%s vgId:%d offset %" PRId64
", found new consumer epoch %d, discard req epoch %d",
consumerId, pRequest->epoch, pHandle->subKey, vgId, fetchVer, savedEpoch, pRequest->epoch);
", found new consumer epoch %d, discard req epoch %d", pRequest->consumerId, pRequest->epoch, pHandle->subKey, vgId, fetchVer, savedEpoch, pRequest->epoch);
break;
}
......@@ -596,7 +538,7 @@ static int32_t doPollDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* p
}
SWalCont* pHead = &pCkHead->head;
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", consumerId,
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d) iter log, vgId:%d offset %" PRId64 " msgType %d", pRequest->consumerId,
pRequest->epoch, vgId, fetchVer, pHead->msgType);
// process meta
......@@ -634,7 +576,7 @@ static int32_t doPollDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* p
};
if (tqTaosxScanLog(pTq, pHandle, submit, &taosxRsp, &totalRows) < 0) {
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", consumerId, vgId,
tqError("tmq poll: tqTaosxScanLog error %" PRId64 ", in vgId:%d, subkey %s", pRequest->consumerId, vgId,
pRequest->subKey);
taosMemoryFreeClear(pCkHead);
tDeleteSTaosxRsp(&taosxRsp);
......@@ -658,6 +600,39 @@ static int32_t doPollDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* p
return 0;
}
static int32_t doPollDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg) {
int32_t code = -1;
STqOffsetVal offset = {0};
STqOffsetVal reqOffset = pRequest->reqOffset;
// 1. reset the offset if needed
if (IS_OFFSET_RESET_TYPE(reqOffset.type)) {
// handle the reset offset cases, according to the consumer's choice.
bool blockReturned = false;
code = extractResetOffsetVal(&offset, pTq, pHandle, pRequest, pMsg, &blockReturned);
if (code != 0) {
return code;
}
// empty block returned, quit
if (blockReturned) {
return 0;
}
} else { // use the consumer specified offset
// the offset value can not be monotonious increase??
offset = reqOffset;
}
// this is a normal subscribe requirement
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
return extractDataAndRspForNormalSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
}
// todo handle the case where re-balance occurs.
// for taosx
return extractDataAndRspForDbStbSubscribe(pTq, pHandle, pRequest, pMsg, &offset);
}
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq req = {0};
if (tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
......@@ -819,13 +794,7 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
pHandle->pRef = pRef;
SReadHandle handle = {
.meta = pVnode->pMeta,
.vnode = pVnode,
.initTableReader = true,
.initTqReader = true,
.version = ver,
};
.meta = pVnode->pMeta, .vnode = pVnode, .initTableReader = true, .initTqReader = true, .version = ver};
pHandle->snapshotVer = ver;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
......@@ -1338,7 +1307,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
pRefBlock->dataRef = pRef;
atomic_add_fetch_32(pRefBlock->dataRef, 1);
if (streamTaskInput(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
if (tAppendDataForStream(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
qError("stream task input del failed, task id %d", pTask->taskId);
atomic_sub_fetch_32(pRef, 1);
......@@ -1373,7 +1342,7 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
taosArrayPush(pStreamBlock->blocks, &block);
if (!failed) {
if (streamTaskInput(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
if (tAppendDataForStream(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
qError("stream task input del failed, task id %d", pTask->taskId);
continue;
}
......@@ -1393,15 +1362,14 @@ int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
}
int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
void* pIter = NULL;
bool failed = false;
SStreamDataSubmit2* pSubmit = NULL;
void* pIter = NULL;
bool succ = true;
pSubmit = streamDataSubmitNew(submit);
SStreamDataSubmit2* pSubmit = streamDataSubmitNew(submit);
if (pSubmit == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("failed to create data submit for stream since out of memory");
failed = true;
succ = false;
}
while (1) {
......@@ -1411,22 +1379,27 @@ int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
}
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) {
continue;
}
if (pTask->taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
tqDebug("skip push task %d, task status %d", pTask->taskId, pTask->taskStatus);
tqDebug("stream task:%d skip push data, not ready for processing, status %d", pTask->taskId, pTask->taskStatus);
continue;
}
tqDebug("data submit enqueue stream task: %d, ver: %" PRId64, pTask->taskId, submit.ver);
tqDebug("data submit enqueue stream task:%d, ver: %" PRId64, pTask->taskId, submit.ver);
if (succ) {
int32_t code = tAppendDataForStream(pTask, (SStreamQueueItem*)pSubmit);
if (code < 0) {
// let's handle the back pressure
if (!failed) {
if (streamTaskInput(pTask, (SStreamQueueItem*)pSubmit) < 0) {
tqError("stream task input failed, task id %d", pTask->taskId);
tqError("stream task:%d failed to put into queue for, too many", pTask->taskId);
continue;
}
if (streamSchedExec(pTask) < 0) {
tqError("stream task launch failed, task id %d", pTask->taskId);
tqError("stream task:%d launch failed, code:%s", pTask->taskId, tstrerror(terrno));
continue;
}
} else {
......@@ -1434,12 +1407,12 @@ int32_t tqProcessSubmitReq(STQ* pTq, SPackedData submit) {
}
}
if (pSubmit) {
streamDataSubmitRefDec(pSubmit);
if (pSubmit != NULL) {
streamDataSubmitDestroy(pSubmit);
taosFreeQitem(pSubmit);
}
return failed ? -1 : 0;
return succ ? 0 : -1;
}
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
......@@ -1559,6 +1532,8 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
return 0;
} else {
tDeleteStreamDispatchReq(&req);
}
code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
......
......@@ -30,7 +30,7 @@ static int32_t tqLoopExecFromQueue(STQ* pTq, STqHandle* pHandle, SStreamDataSubm
// update processed
atomic_store_64(&pHandle->pushHandle.processedVer, pSubmit->ver);
streamQueueProcessSuccess(&pHandle->pushHandle.inputQ);
streamDataSubmitRefDec(pSubmit);
streamDataSubmitDestroy(pSubmit);
if (pRsp->blockNum > 0) {
*ppSubmit = pSubmit;
return 0;
......@@ -58,7 +58,7 @@ int32_t tqExecFromInputQ(STQ* pTq, STqHandle* pHandle) {
}
while (pHandle->pushHandle.processedVer > pSubmit->ver + 1) {
streamQueueProcessSuccess(&pHandle->pushHandle.inputQ);
streamDataSubmitRefDec(pSubmit);
streamDataSubmitDestroy(pSubmit);
pSubmit = streamQueueNextItem(&pHandle->pushHandle.inputQ);
if (pSubmit == NULL) break;
}
......@@ -120,7 +120,7 @@ int32_t tqPreparePush(STQ* pTq, STqHandle* pHandle, int64_t reqId, const SRpcHan
int32_t tqEnqueue(STqHandle* pHandle, SStreamDataSubmit* pSubmit) {
int8_t inputStatus = atomic_load_8(&pHandle->pushHandle.inputStatus);
if (inputStatus == TASK_INPUT_STATUS__NORMAL) {
SStreamDataSubmit* pSubmitClone = streamSubmitRefClone(pSubmit);
SStreamDataSubmit* pSubmitClone = streamSubmitBlockClone(pSubmit);
if (pSubmitClone == NULL) {
return -1;
}
......@@ -207,33 +207,18 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
#endif
typedef struct {
void* pKey;
void* pKey;
int64_t keyLen;
} SItem;
static void recordPushedEntry(SArray* cachedKey, void* pIter);
static void doRemovePushedEntry(SArray* pCachedKeys, STQ* pTq);
static void freeItem(void* param) {
SItem* p = (SItem*) param;
taosMemoryFree(p->pKey);
}
static void doRemovePushedEntry(SArray* pCachedKeys, STQ* pTq) {
int32_t vgId = TD_VID(pTq->pVnode);
int32_t numOfKeys = (int32_t) taosArrayGetSize(pCachedKeys);
for (int32_t i = 0; i < numOfKeys; i++) {
SItem* pItem = taosArrayGet(pCachedKeys, i);
if (taosHashRemove(pTq->pPushMgr, pItem->pKey, pItem->keyLen) != 0) {
tqError("vgId:%d, tq push hash remove key error, key: %s", vgId, (char*) pItem->pKey);
}
}
if (numOfKeys > 0) {
tqDebug("vgId:%d, pushed %d items and remain:%d", vgId, numOfKeys, (int32_t)taosHashGetSize(pTq->pPushMgr));
}
}
static void doPushDataForEntry(void* pIter, STqExecHandle* pExec, STQ* pTq, int64_t ver, int32_t vgId, char* pData,
int32_t dataLen, SArray* pCachedKey) {
STqPushEntry* pPushEntry = *(STqPushEntry**)pIter;
......@@ -253,7 +238,7 @@ static void doPushDataForEntry(void* pIter, STqExecHandle* pExec, STQ* pTq, int6
if (qStreamSetScanMemData(pTaskInfo, submit) != 0) {
return;
}
qStreamSetOpen(pTaskInfo);
// here start to scan submit block to extract the subscribed data
int32_t totalRows = 0;
......@@ -347,7 +332,7 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
void* data = taosMemoryMalloc(len);
if (data == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("failed to copy data for stream since out of memory");
tqError("vgId:%d, failed to copy submit data for stream processing, since out of memory", vgId);
return -1;
}
......@@ -366,13 +351,6 @@ int32_t tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t v
return 0;
}
void recordPushedEntry(SArray* cachedKey, void* pIter) {
size_t kLen = 0;
void* key = taosHashGetKey(pIter, &kLen);
SItem item = {.pKey = strndup(key, kLen), .keyLen = kLen};
taosArrayPush(cachedKey, &item);
}
int32_t tqRegisterPushEntry(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg,
SMqDataRsp* pDataRsp, int32_t type) {
uint64_t consumerId = pRequest->consumerId;
......@@ -430,3 +408,26 @@ int32_t tqUnregisterPushEntry(STQ* pTq, const char* pKey, int32_t keyLen, uint64
return 0;
}
void recordPushedEntry(SArray* cachedKey, void* pIter) {
size_t kLen = 0;
void* key = taosHashGetKey(pIter, &kLen);
SItem item = {.pKey = strndup(key, kLen), .keyLen = kLen};
taosArrayPush(cachedKey, &item);
}
void doRemovePushedEntry(SArray* pCachedKeys, STQ* pTq) {
int32_t vgId = TD_VID(pTq->pVnode);
int32_t numOfKeys = (int32_t) taosArrayGetSize(pCachedKeys);
for (int32_t i = 0; i < numOfKeys; i++) {
SItem* pItem = taosArrayGet(pCachedKeys, i);
if (taosHashRemove(pTq->pPushMgr, pItem->pKey, pItem->keyLen) != 0) {
tqError("vgId:%d, tq push hash remove key error, key: %s", vgId, (char*) pItem->pKey);
}
}
if (numOfKeys > 0) {
tqDebug("vgId:%d, pushed %d items and remain:%d", vgId, numOfKeys, (int32_t)taosHashGetSize(pTq->pPushMgr));
}
}
此差异已折叠。
......@@ -71,37 +71,25 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs
int32_t totalRows = 0;
const STqExecHandle* pExec = &pHandle->execHandle;
qTaskInfo_t task = pExec->task;
qTaskInfo_t task = pExec->task;
if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, vgId:%d, consumer:0x%"PRIx64, vgId, pHandle->consumerId);
if (pOffset->type == TMQ_OFFSET__LOG) {
pRsp->rspOffset = *pOffset;
return code;
} else {
tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, vgId:%d, consumer:0x%"PRIx64, vgId, pHandle->consumerId);
pRsp->rspOffset = *pOffset;
return code;
}
}
tqError("prepare scan failed, return");
return -1;
}
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
tqDebug("vgId:%d, tmq task start to execute, consumer:0x%"PRIx64, vgId, pHandle->consumerId);
code = qExecTask(task, &pDataBlock, &ts);
if (code != TSDB_CODE_SUCCESS) {
tqError("vgId:%d, task exec error since %s, consumer:0x%" PRIx64, vgId, terrstr(),
pHandle->consumerId);
return code;
qStreamSetOpen(task);
tqDebug("consumer:0x%"PRIx64" vgId:%d, tmq one task start execute", pHandle->consumerId, vgId);
if (qExecTask(task, &pDataBlock, &ts) != TSDB_CODE_SUCCESS) {
tqError("consumer:0x%"PRIx64" vgId:%d, task exec error since %s", pHandle->consumerId, vgId, terrstr());
return -1;
}
// current scan should be stopped ASAP, since the re-balance occurs.
tqDebug("consumer:0x%"PRIx64" vgId:%d tmq one task end executed, pDataBlock:%p", pHandle->consumerId, vgId, pDataBlock);
// current scan should be stopped asap, since the rebalance occurs.
if (pDataBlock == NULL) {
break;
}
......@@ -113,37 +101,15 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs
}
pRsp->blockNum++;
tqDebug("vgId:%d, consumer:0x%" PRIx64 " tmq task executed, rows:%" PRId64 ", total blocks:%d", vgId, pHandle->consumerId,
pDataBlock->info.rows, pRsp->blockNum);
if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
totalRows += pDataBlock->info.rows;
if (totalRows >= MAX_ROWS_TO_RETURN) {
break;
}
totalRows += pDataBlock->info.rows;
if (totalRows >= MAX_ROWS_TO_RETURN) {
break;
}
}
tqDebug("consumer:0x%"PRIx64" vgId:%d tmq task executed finished, total blocks:%d, totalRows:%d", pHandle->consumerId, vgId, pRsp->blockNum, totalRows);
qStreamExtractOffset(task, &pRsp->rspOffset);
if (pRsp->rspOffset.type == 0) {
code = TSDB_CODE_INVALID_PARA;
tqError("vgId:%d, expected rsp offset: type %d %" PRId64 " %" PRId64 " %" PRId64, vgId, pRsp->rspOffset.type,
pRsp->rspOffset.ts, pRsp->rspOffset.uid, pRsp->rspOffset.version);
return code;
}
if (pRsp->withTbName || pRsp->withSchema) {
code = TSDB_CODE_INVALID_PARA;
tqError("vgId:%d, get column should not with meta:%d,%d", vgId, pRsp->withTbName, pRsp->withSchema);
return code;
}
tqDebug("vgId:%d, consumer:0x%" PRIx64 " tmq task executed, total blocks:%d, rows:%d", vgId, pHandle->consumerId,
pRsp->blockNum, totalRows);
return code;
return 0;
}
int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* pOffset) {
......@@ -151,18 +117,8 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
qTaskInfo_t task = pExec->task;
if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
if (pOffset->type == TMQ_OFFSET__LOG) {
pRsp->rspOffset = *pOffset;
return 0;
} else {
tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
pRsp->rspOffset = *pOffset;
return 0;
}
}
tqDebug("tqScanTaosx prepare scan failed, return");
return -1;
}
int32_t rowCnt = 0;
......@@ -208,42 +164,31 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
}
}
if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
if (qStreamExtractPrepareUid(task) != 0) {
// get meta
SMqMetaRsp* tmp = qStreamExtractMetaMsg(task);
if (tmp->metaRspLen > 0) {
qStreamExtractOffset(task, &tmp->rspOffset);
*pMetaRsp = *tmp;
tqDebug("tmqsnap task get data");
break;
}
if (pDataBlock == NULL) {
qStreamExtractOffset(task, pOffset);
if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
continue;
}
tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
pHandle->snapshotVer + 1);
tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode), pHandle->snapshotVer + 1);
qStreamExtractOffset(task, &pRsp->rspOffset);
break;
}
if (pRsp->blockNum > 0) {
tqDebug("tmqsnap task exec exited, get data");
qStreamExtractOffset(task, &pRsp->rspOffset);
break;
}
SMqMetaRsp* tmp = qStreamExtractMetaMsg(task);
if (tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
tqOffsetResetToData(pOffset, tmp->rspOffset.uid, tmp->rspOffset.ts);
qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
tmp->rspOffset.type = TMQ_OFFSET__SNAPSHOT_META;
tqDebug("tmqsnap task exec change to get data");
continue;
}
*pMetaRsp = *tmp;
tqDebug("tmqsnap task exec exited, get meta");
tqDebug("task exec exited");
break;
}
qStreamExtractOffset(task, &pRsp->rspOffset);
if (pRsp->rspOffset.type == 0) {
tqError("expected rsp offset: type %d %" PRId64 " %" PRId64 " %" PRId64, pRsp->rspOffset.type, pRsp->rspOffset.ts,
pRsp->rspOffset.uid, pRsp->rspOffset.version);
return -1;
}
return 0;
......
......@@ -541,6 +541,7 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return vnodeGetBatchMeta(pVnode, pMsg);
case TDMT_VND_TMQ_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:
return tqProcessTaskRunReq(pVnode->pTq, pMsg);
#if 1
......
......@@ -128,10 +128,10 @@ enum {
typedef struct {
// TODO remove prepareStatus
STqOffsetVal prepareStatus; // for tmq
STqOffsetVal lastStatus; // for tmq
// STqOffsetVal prepareStatus; // for tmq
STqOffsetVal currentOffset; // for tmq
SMqMetaRsp metaRsp; // for tmq fetching meta
int8_t returned;
// int8_t returned;
int64_t snapshotVer;
// const SSubmitReq* pReq;
......@@ -191,7 +191,7 @@ enum {
OP_OPENED = 0x1,
OP_RES_TO_RETURN = 0x5,
OP_EXEC_DONE = 0x9,
OP_EXEC_RECV = 0x11,
// OP_EXEC_RECV = 0x11,
};
typedef struct SOperatorFpSet {
......@@ -502,32 +502,12 @@ typedef struct STableCountScanSupp {
char stbNameFilter[TSDB_TABLE_NAME_LEN];
} STableCountScanSupp;
typedef struct STableCountScanOperatorInfo {
SReadHandle readHandle;
SSDataBlock* pRes;
STableCountScanSupp supp;
int32_t currGrpIdx;
SArray* stbUidList; // when group by db_name and/or stable_name
} STableCountScanOperatorInfo;
typedef struct SOptrBasicInfo {
SResultRowInfo resultRowInfo;
SSDataBlock* pRes;
bool mergeResultBlock;
} SOptrBasicInfo;
typedef struct SAggOperatorInfo {
SOptrBasicInfo binfo;
SAggSupporter aggSup;
STableQueryInfo* current;
uint64_t groupId;
SGroupResInfo groupResInfo;
SExprSupp scalarExprSup;
bool groupKeyOptimized;
} SAggOperatorInfo;
typedef struct SIntervalAggOperatorInfo {
SOptrBasicInfo binfo; // basic info
SAggSupporter aggSup; // aggregate supporter
......@@ -852,7 +832,7 @@ SArray* getTableListInfo(const SExecTaskInfo* pTaskInfo);
int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
int32_t vgId, char* sql, EOPTR_EXEC_MODEL model);
int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, STableListInfo* pTableListInfo, SReadHandle* readHandle);
int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, SExecTaskInfo* pTask, SReadHandle* readHandle);
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval,
......
......@@ -40,6 +40,16 @@ typedef struct {
int32_t startOffset;
} SFunctionCtxStatus;
typedef struct SAggOperatorInfo {
SOptrBasicInfo binfo;
SAggSupporter aggSup;
STableQueryInfo* current;
uint64_t groupId;
SGroupResInfo groupResInfo;
SExprSupp scalarExprSup;
bool groupKeyOptimized;
} SAggOperatorInfo;
static void destroyAggOperatorInfo(void* param);
static void setExecutionContext(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
......
......@@ -127,12 +127,10 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
pOperator->status = OP_NOT_OPENED;
SStreamScanInfo* pInfo = pOperator->info;
qDebug("stream set total blocks:%d, task id:%s" PRIx64, (int32_t)numOfBlocks, id);
ASSERT(pInfo->validBlockIndex == 0);
ASSERT(taosArrayGetSize(pInfo->pBlockLists) == 0);
qDebug("task stream set total blocks:%d %s", (int32_t)numOfBlocks, id);
ASSERT(pInfo->validBlockIndex == 0 && taosArrayGetSize(pInfo->pBlockLists) == 0);
if (type == STREAM_INPUT__MERGED_SUBMIT) {
// ASSERT(numOfBlocks > 1);
for (int32_t i = 0; i < numOfBlocks; i++) {
SPackedData* pReq = POINTER_SHIFT(input, i * sizeof(SPackedData));
taosArrayPush(pInfo->pBlockLists, pReq);
......@@ -505,12 +503,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
if (handle) {
void* pSinkParam = NULL;
SArray* pInfoList = getTableListInfo(*pTask);
STableListInfo* pTableListInfo = taosArrayGetP(pInfoList, 0);
taosArrayDestroy(pInfoList);
code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, pTableListInfo, readHandle);
code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, (*pTask), readHandle);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to createDataSinkParam, vgId:%d, code:%s, %s", vgId, tstrerror(code), (*pTask)->id.str);
goto _error;
......@@ -1029,15 +1022,9 @@ SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
return &pTaskInfo->streamInfo.metaRsp;
}
int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo) {
void qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
return pTaskInfo->streamInfo.prepareStatus.uid;
}
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
memcpy(pOffset, &pTaskInfo->streamInfo.lastStatus, sizeof(STqOffsetVal));
return 0;
memcpy(pOffset, &pTaskInfo->streamInfo.currentOffset, sizeof(STqOffsetVal));
}
int32_t initQueryTableDataCondForTmq(SQueryTableDataCond* pCond, SSnapContext* sContext, SMetaTableInfo* pMtInfo) {
......@@ -1083,33 +1070,38 @@ int32_t qStreamSetScanMemData(qTaskInfo_t tinfo, SPackedData submit) {
return 0;
}
void qStreamSetOpen(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
pOperator->status = OP_NOT_OPENED;
}
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
const char* id = GET_TASKID(pTaskInfo);
pTaskInfo->streamInfo.prepareStatus = *pOffset;
pTaskInfo->streamInfo.returned = 0;
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
// if pOffset equal to current offset, means continue consume
if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.currentOffset)) {
return 0;
}
if (subType == TOPIC_SUB_TYPE__COLUMN) {
pOperator->status = OP_OPENED;
pOperator = extractOperatorInTree(pOperator, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, id);
if (pOperator == NULL) {
return -1;
}
SStreamScanInfo* pInfo = pOperator->info;
STableScanInfo* pScanInfo = pInfo->pTableScanOp->info;
STableScanBase* pScanBaseInfo = &pScanInfo->base;
STableListInfo* pTableListInfo = pScanBaseInfo->pTableListInfo;
STableListInfo* pTableListInfo = pScanBaseInfo->pTableListInfo;
if (pOffset->type == TMQ_OFFSET__LOG) {
tsdbReaderClose(pScanBaseInfo->dataReader);
pScanBaseInfo->dataReader = NULL;
// let's seek to the next version in wal file
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1, pTaskInfo->id.str) < 0) {
if (tqSeekVer(pInfo->tqReader, pOffset->version + 1, id) < 0) {
qError("tqSeekVer failed ver:%"PRId64", %s", pOffset->version + 1, id);
return -1;
}
......@@ -1137,6 +1129,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
}
}
qDebug("switch to table uid:%" PRId64 " ts:%" PRId64 "% "PRId64 " rows returned", uid, ts, pInfo->pTableScanOp->resultInfo.totalRows);
pInfo->pTableScanOp->resultInfo.totalRows = 0;
// start from current accessed position
......@@ -1241,6 +1234,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
qDebug("tmqsnap qStreamPrepareScan snapshot log, %s", id);
}
}
pTaskInfo->streamInfo.currentOffset = *pOffset;
return 0;
}
......
......@@ -1586,7 +1586,7 @@ int32_t extractTableScanNode(SPhysiNode* pNode, STableScanPhysiNode** ppNode) {
return -1;
}
int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, STableListInfo* pTableListInfo, SReadHandle* readHandle) {
int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, SExecTaskInfo* pTask, SReadHandle* readHandle) {
switch (pNode->type) {
case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT: {
SInserterParam* pInserterParam = taosMemoryCalloc(1, sizeof(SInserterParam));
......@@ -1604,23 +1604,26 @@ int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, STableListInfo*
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t tbNum = tableListGetSize(pTableListInfo);
SArray* pInfoList = getTableListInfo(pTask);
STableListInfo* pTableListInfo = taosArrayGetP(pInfoList, 0);
taosArrayDestroy(pInfoList);
pDeleterParam->suid = tableListGetSuid(pTableListInfo);
// TODO extract uid list
pDeleterParam->pUidList = taosArrayInit(tbNum, sizeof(uint64_t));
int32_t numOfTables = tableListGetSize(pTableListInfo);
pDeleterParam->pUidList = taosArrayInit(numOfTables, sizeof(uint64_t));
if (NULL == pDeleterParam->pUidList) {
taosMemoryFree(pDeleterParam);
return TSDB_CODE_OUT_OF_MEMORY;
}
for (int32_t i = 0; i < tbNum; ++i) {
for (int32_t i = 0; i < numOfTables; ++i) {
STableKeyInfo* pTable = tableListGetInfo(pTableListInfo, i);
taosArrayPush(pDeleterParam->pUidList, &pTable->uid);
}
*pParam = pDeleterParam;
break;
}
default:
......@@ -1967,11 +1970,11 @@ void qStreamCloseTsdbReader(void* task) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)task;
SOperatorInfo* pOp = pTaskInfo->pRoot;
qDebug("stream close tsdb reader, reset status uid:%" PRId64 " ts:%" PRId64, pTaskInfo->streamInfo.lastStatus.uid,
pTaskInfo->streamInfo.lastStatus.ts);
qDebug("stream close tsdb reader, reset status uid:%" PRId64 " ts:%" PRId64, pTaskInfo->streamInfo.currentOffset.uid,
pTaskInfo->streamInfo.currentOffset.ts);
// todo refactor, other thread may already use this read to extract data.
pTaskInfo->streamInfo.lastStatus = (STqOffsetVal){0};
pTaskInfo->streamInfo.currentOffset = (STqOffsetVal){0};
while (pOp->numOfDownstream == 1 && pOp->pDownstream[0]) {
SOperatorInfo* pDownstreamOp = pOp->pDownstream[0];
if (pDownstreamOp->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
......
......@@ -227,17 +227,8 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
blockDataCleanup(pFinalRes);
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
if (pTaskInfo->streamInfo.submit.msgStr) {
pOperator->status = OP_OPENED;
}
if (pOperator->status == OP_EXEC_DONE) {
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
pOperator->status = OP_OPENED;
qDebug("projection in queue model, set status open and return null");
return NULL;
}
return NULL;
}
......@@ -263,23 +254,14 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// The downstream exec may change the value of the newgroup, so use a local variable instead.
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE && pFinalRes->info.rows == 0) {
pOperator->status = OP_OPENED;
if (pOperator->status == OP_EXEC_RECV) {
continue;
} else {
return NULL;
}
}
qDebug("set op close, exec %d, status %d rows %" PRId64 , pTaskInfo->execModel, pOperator->status,
pFinalRes->info.rows);
qDebug("set op close, exec %d, status %d rows %" PRId64 , pTaskInfo->execModel, pOperator->status, pFinalRes->info.rows);
setOperatorCompleted(pOperator);
break;
}
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
qDebug("set status recv");
pOperator->status = OP_EXEC_RECV;
}
// if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
// qDebug("set status recv");
// pOperator->status = OP_EXEC_RECV;
// }
// for stream interval
if (pBlock->info.type == STREAM_RETRIEVE || pBlock->info.type == STREAM_DELETE_RESULT ||
......
......@@ -33,7 +33,6 @@
int32_t scanDebug = 0;
#define MULTI_READER_MAX_TABLE_NUM 5000
#define SET_REVERSE_SCAN_FLAG(_info) ((_info)->scanFlag = REVERSE_SCAN)
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
......@@ -52,6 +51,16 @@ typedef struct STableMergeScanSortSourceParam {
STsdbReader* dataReader;
} STableMergeScanSortSourceParam;
typedef struct STableCountScanOperatorInfo {
SReadHandle readHandle;
SSDataBlock* pRes;
STableCountScanSupp supp;
int32_t currGrpIdx;
SArray* stbUidList; // when group by db_name and/or stable_name
} STableCountScanOperatorInfo;
static bool processBlockWithProbability(const SSampleExecInfo* pInfo);
bool processBlockWithProbability(const SSampleExecInfo* pInfo) {
......@@ -698,9 +707,9 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
// todo refactor
/*pTableScanInfo->lastStatus.uid = pBlock->info.id.uid;*/
/*pTableScanInfo->lastStatus.ts = pBlock->info.window.ekey;*/
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.id.uid;
pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
// pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
// pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.id.uid;
// pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
return pBlock;
}
......@@ -890,9 +899,11 @@ static void destroyTableScanOperatorInfo(void* param) {
SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo) {
int32_t code = 0;
STableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _error;
}
......@@ -900,8 +911,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
SDataBlockDescNode* pDescNode = pScanNode->node.pOutputDataBlockDesc;
int32_t numOfCols = 0;
int32_t code =
extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->base.matchInfo);
code = extractColMatchInfo(pScanNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID, &pInfo->base.matchInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
......@@ -1623,14 +1633,11 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
qDebug("start to exec queue scan, %s", id);
if (pTaskInfo->streamInfo.submit.msgStr != NULL) {
if (pInfo->tqReader->msg2.msgStr == NULL) {
SPackedData submit = pTaskInfo->streamInfo.submit;
if (tqReaderSetSubmitReq2(pInfo->tqReader, submit.msgStr, submit.msgLen, submit.ver) < 0) {
qError("submit msg messed up when initing stream submit block %p, %s", submit.msgStr, id);
pInfo->tqReader->msg2 = (SPackedData){0};
pInfo->tqReader->setMsg = 0;
ASSERT(0);
qError("submit msg messed up when initing stream submit block %p", submit.msgStr);
return NULL;
}
}
......@@ -1653,73 +1660,49 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
}
pInfo->tqReader->msg2 = (SPackedData){0};
pInfo->tqReader->setMsg = 0;
pTaskInfo->streamInfo.submit = (SPackedData){0};
return NULL;
}
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
if (pResult && pResult->info.rows > 0) {
qDebug("queue scan tsdb return %" PRId64 " rows min:%" PRId64 " max:%" PRId64 " wal curVersion:%" PRId64" %s", pResult->info.rows,
pResult->info.window.skey, pResult->info.window.ekey, pInfo->tqReader->pWalReader->curVersion, id);
pTaskInfo->streamInfo.returned = 1;
qDebug("queue scan tsdb return %"PRId64" rows min:%" PRId64 " max:%" PRId64 " wal curVersion:%" PRId64, pResult->info.rows,
pResult->info.window.skey, pResult->info.window.ekey, pInfo->tqReader->pWalReader->curVersion);
tqOffsetResetToData(&pTaskInfo->streamInfo.currentOffset, pResult->info.id.uid, pResult->info.window.ekey);
return pResult;
} else {
// no data has return already, try to extract data in the WAL
if (!pTaskInfo->streamInfo.returned) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->base.dataReader);
pTSInfo->base.dataReader = NULL;
tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer);
qDebug("queue scan tsdb over, switch to wal ver:%" PRId64 " %s", pTaskInfo->streamInfo.snapshotVer + 1, id);
if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1, pTaskInfo->id.str) < 0) {
tqOffsetResetToLog(&pTaskInfo->streamInfo.lastStatus, pTaskInfo->streamInfo.snapshotVer);
return NULL;
}
} else {
return NULL;
}
}
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
tsdbReaderClose(pTSInfo->base.dataReader);
pTSInfo->base.dataReader = NULL;
qDebug("queue scan tsdb over, switch to wal ver %" PRId64 "", pTaskInfo->streamInfo.snapshotVer + 1);
if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1, pTaskInfo->id.str) < 0) {
return NULL;
}
tqOffsetResetToLog(&pTaskInfo->streamInfo.currentOffset, pTaskInfo->streamInfo.snapshotVer);
}
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__LOG) {
while (1) {
SFetchRet ret = {0};
terrno = 0;
if (tqNextBlock(pInfo->tqReader, &ret) < 0) {
// if the end is reached, terrno is 0
if (terrno != 0) {
qError("failed to get next log block since %s, %s", terrstr(), id);
}
}
tqNextBlock(pInfo->tqReader, &ret);
tqOffsetResetToLog(&pTaskInfo->streamInfo.currentOffset, pInfo->tqReader->pWalReader->curVersion - 1); //curVersion move to next, so currentOffset = curVersion - 1
if (ret.fetchType == FETCH_TYPE__DATA) {
qDebug("doQueueScan get data from log %"PRId64" rows, version:%" PRId64, ret.data.info.rows, pTaskInfo->streamInfo.currentOffset.version);
blockDataCleanup(pInfo->pRes);
setBlockIntoRes(pInfo, &ret.data, true);
if (pInfo->pRes->info.rows > 0) {
pOperator->status = OP_EXEC_RECV;
qDebug("queue scan log return %" PRId64 " rows", pInfo->pRes->info.rows);
qDebug("doQueueScan get data from log %"PRId64" rows, return, version:%" PRId64, pInfo->pRes->info.rows, pTaskInfo->streamInfo.currentOffset.version);
return pInfo->pRes;
}
} else if (ret.fetchType == FETCH_TYPE__META) {
qError("unexpected ret.fetchType:%d", ret.fetchType);
continue;
} else if (ret.fetchType == FETCH_TYPE__NONE ||
(ret.fetchType == FETCH_TYPE__SEP && pOperator->status == OP_EXEC_RECV)) {
pTaskInfo->streamInfo.lastStatus = ret.offset;
char formatBuf[80];
tFormatOffset(formatBuf, 80, &ret.offset);
qDebug("queue scan log return null, offset %s", formatBuf);
pOperator->status = OP_OPENED;
}else if(ret.fetchType == FETCH_TYPE__NONE){
qDebug("doQueueScan get none from log, return, version:%" PRId64, pTaskInfo->streamInfo.currentOffset.version);
return NULL;
}
}
} else {
qError("unexpected streamInfo prepare type: %d %s", pTaskInfo->streamInfo.prepareStatus.type, id);
qError("unexpected streamInfo prepare type: %d", pTaskInfo->streamInfo.currentOffset.type);
return NULL;
}
}
......@@ -2074,7 +2057,6 @@ FETCH_NEXT_BLOCK:
int32_t current = pInfo->validBlockIndex++;
SPackedData* pSubmit = taosArrayGet(pInfo->pBlockLists, current);
/*if (tqReaderSetDataMsg(pInfo->tqReader, pSubmit, 0) < 0) {*/
if (tqReaderSetSubmitReq2(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
qError("submit msg messed up when initing stream submit block %p, current %d, total %d", pSubmit, current,
totBlockNum);
......@@ -2166,7 +2148,7 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
pTaskInfo->streamInfo.metaRsp.metaRsp = NULL;
qDebug("tmqsnap doRawScan called");
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
bool hasNext = false;
if (pInfo->dataReader) {
code = tsdbNextDataBlock(pInfo->dataReader, &hasNext);
......@@ -2188,28 +2170,23 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
}
qDebug("tmqsnap doRawScan get data uid:%" PRId64 "", pBlock->info.id.uid);
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.id.uid;
pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
tqOffsetResetToData(&pTaskInfo->streamInfo.currentOffset, pBlock->info.id.uid, pBlock->info.window.ekey);
return pBlock;
}
SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
STqOffsetVal offset = {0};
if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
qDebug("tmqsnap read snapshot done, change to get data from wal");
pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
pTaskInfo->streamInfo.lastStatus.version = pInfo->sContext->snapVersion;
tqOffsetResetToLog(&offset, pInfo->sContext->snapVersion);
} else {
pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
pTaskInfo->streamInfo.prepareStatus.ts = INT64_MIN;
tqOffsetResetToData(&offset, mtInfo.uid, INT64_MIN);
qDebug("tmqsnap change get data uid:%" PRId64 "", mtInfo.uid);
qStreamPrepareScan(pTaskInfo, &pTaskInfo->streamInfo.prepareStatus, pInfo->sContext->subType);
}
qStreamPrepareScan(pTaskInfo, &offset, pInfo->sContext->subType);
tDeleteSSchemaWrapper(mtInfo.schema);
qDebug("tmqsnap stream scan tsdb return null");
return NULL;
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
} else if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__SNAPSHOT_META) {
SSnapContext* sContext = pInfo->sContext;
void* data = NULL;
int32_t dataLen = 0;
......@@ -2221,16 +2198,12 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
return NULL;
}
if (!sContext->queryMetaOrData) { // change to get data next poll request
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
pTaskInfo->streamInfo.lastStatus.uid = uid;
pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__SNAPSHOT_DATA;
pTaskInfo->streamInfo.metaRsp.rspOffset.uid = 0;
pTaskInfo->streamInfo.metaRsp.rspOffset.ts = INT64_MIN;
if (!sContext->queryMeta) { // change to get data next poll request
STqOffsetVal offset = {0};
tqOffsetResetToData(&offset, 0, INT64_MIN);
qStreamPrepareScan(pTaskInfo, &offset, pInfo->sContext->subType);
} else {
pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
pTaskInfo->streamInfo.lastStatus.uid = uid;
pTaskInfo->streamInfo.metaRsp.rspOffset = pTaskInfo->streamInfo.lastStatus;
tqOffsetResetToMeta(&pTaskInfo->streamInfo.currentOffset, uid);
pTaskInfo->streamInfo.metaRsp.resMsgType = type;
pTaskInfo->streamInfo.metaRsp.metaRspLen = dataLen;
pTaskInfo->streamInfo.metaRsp.metaRsp = data;
......@@ -2444,7 +2417,6 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (pHandle->initTableReader) {
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
pTSInfo->base.dataReader = NULL;
pTaskInfo->streamInfo.lastStatus.uid = -1;
}
if (pHandle->initTqReader) {
......
......@@ -4758,6 +4758,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
if (!pInfo->pUpdated) {
pInfo->pUpdated = taosArrayInit(4, sizeof(SWinKey));
}
if (!pInfo->pUpdatedMap) {
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pUpdatedMap = tSimpleHashInit(1024, hashFn);
......
......@@ -330,7 +330,7 @@ column_def_list(A) ::= column_def(B).
column_def_list(A) ::= column_def_list(B) NK_COMMA column_def(C). { A = addNodeToList(pCxt, B, C); }
column_def(A) ::= column_name(B) type_name(C). { A = createColumnDefNode(pCxt, &B, C, NULL); }
column_def(A) ::= column_name(B) type_name(C) COMMENT NK_STRING(D). { A = createColumnDefNode(pCxt, &B, C, &D); }
//column_def(A) ::= column_name(B) type_name(C) COMMENT NK_STRING(D). { A = createColumnDefNode(pCxt, &B, C, &D); }
%type type_name { SDataType }
%destructor type_name { }
......
......@@ -470,11 +470,11 @@ int32_t insMergeTableDataCxt(SHashObj* pTableHash, SArray** pVgDataBlocks) {
if (TSDB_CODE_SUCCESS == code) {
SVgroupDataCxt* pVgCxt = NULL;
int32_t vgId = pTableCxt->pMeta->vgId;
void** p = taosHashGet(pVgroupHash, &vgId, sizeof(vgId));
if (NULL == p) {
void** pp = taosHashGet(pVgroupHash, &vgId, sizeof(vgId));
if (NULL == pp) {
code = createVgroupDataCxt(pTableCxt, pVgroupHash, pVgroupList, &pVgCxt);
} else {
pVgCxt = *(SVgroupDataCxt**)p;
pVgCxt = *(SVgroupDataCxt**)pp;
}
if (TSDB_CODE_SUCCESS == code) {
code = fillVgroupDataCxt(pTableCxt, pVgCxt);
......
此差异已折叠。
......@@ -742,10 +742,10 @@ TEST_F(ParserInitialCTest, createStable) {
addFieldToCreateStbReq(false, "a15", TSDB_DATA_TYPE_VARCHAR, 50 + VARSTR_HEADER_SIZE);
run("CREATE STABLE IF NOT EXISTS rollup_db.t1("
"ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), "
"c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, "
"c8 SMALLINT, c9 SMALLINT UNSIGNED, c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, "
"c13 NCHAR(30), c14 VARCHAR(50)) "
"TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, "
"a8 BINARY(20), a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, "
"a8 BINARY(20), a9 SMALLINT, a10 SMALLINT UNSIGNED, a11 TINYINT, "
"a12 TINYINT UNSIGNED, a13 BOOL, a14 NCHAR(30), a15 VARCHAR(50)) "
"TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN) MAX_DELAY 100s,10m WATERMARK 10a,1m "
"DELETE_MARK 1000s,200m");
......@@ -1005,16 +1005,16 @@ TEST_F(ParserInitialCTest, createTable) {
run("CREATE TABLE IF NOT EXISTS test.t1("
"ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), "
"c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, "
"c8 SMALLINT, c9 SMALLINT UNSIGNED, c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, "
"c13 NCHAR(30), c15 VARCHAR(50)) "
"TTL 100 COMMENT 'test create table' SMA(c1, c2, c3)");
run("CREATE TABLE IF NOT EXISTS rollup_db.t1("
"ts TIMESTAMP, c1 INT, c2 INT UNSIGNED, c3 BIGINT, c4 BIGINT UNSIGNED, c5 FLOAT, c6 DOUBLE, c7 BINARY(20), "
"c8 SMALLINT, c9 SMALLINT UNSIGNED COMMENT 'test column comment', c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, "
"c8 SMALLINT, c9 SMALLINT UNSIGNED, c10 TINYINT, c11 TINYINT UNSIGNED, c12 BOOL, "
"c13 NCHAR(30), c14 VARCHAR(50)) "
"TAGS (a1 TIMESTAMP, a2 INT, a3 INT UNSIGNED, a4 BIGINT, a5 BIGINT UNSIGNED, a6 FLOAT, a7 DOUBLE, a8 BINARY(20), "
"a9 SMALLINT, a10 SMALLINT UNSIGNED COMMENT 'test column comment', a11 TINYINT, a12 TINYINT UNSIGNED, a13 BOOL, "
"a9 SMALLINT, a10 SMALLINT UNSIGNED, a11 TINYINT, a12 TINYINT UNSIGNED, a13 BOOL, "
"a14 NCHAR(30), a15 VARCHAR(50)) "
"TTL 100 COMMENT 'test create table' SMA(c1, c2, c3) ROLLUP (MIN)");
......
......@@ -68,7 +68,7 @@ void streamSchedByTimer(void* param, void* tmrId) {
atomic_store_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE);
if (streamTaskInput(pTask, (SStreamQueueItem*)trigger) < 0) {
if (tAppendDataForStream(pTask, (SStreamQueueItem*)trigger) < 0) {
taosFreeQitem(trigger);
taosTmrReset(streamSchedByTimer, (int32_t)pTask->triggerParam, pTask, streamEnv.timer, &pTask->timer);
return;
......@@ -92,22 +92,22 @@ int32_t streamSetupTrigger(SStreamTask* pTask) {
int32_t streamSchedExec(SStreamTask* pTask) {
int8_t schedStatus =
atomic_val_compare_exchange_8(&pTask->schedStatus, TASK_SCHED_STATUS__INACTIVE, TASK_SCHED_STATUS__WAITING);
if (schedStatus == TASK_SCHED_STATUS__INACTIVE) {
SStreamTaskRunReq* pRunReq = rpcMallocCont(sizeof(SStreamTaskRunReq));
if (pRunReq == NULL) {
atomic_store_8(&pTask->schedStatus, TASK_SCHED_STATUS__INACTIVE);
return -1;
}
pRunReq->head.vgId = pTask->nodeId;
pRunReq->streamId = pTask->streamId;
pRunReq->taskId = pTask->taskId;
SRpcMsg msg = {
.msgType = TDMT_STREAM_TASK_RUN,
.pCont = pRunReq,
.contLen = sizeof(SStreamTaskRunReq),
};
SRpcMsg msg = { .msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq) };
tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &msg);
}
return 0;
}
......@@ -123,7 +123,7 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, const SStreamDispatchReq* pReq, SR
/*pData->blocks = pReq->data;*/
/*pBlock->sourceVer = pReq->sourceVer;*/
streamDispatchReqToData(pReq, pData);
if (streamTaskInput(pTask, (SStreamQueueItem*)pData) == 0) {
if (tAppendDataForStream(pTask, (SStreamQueueItem*)pData) == 0) {
status = TASK_INPUT_STATUS__NORMAL;
} else {
status = TASK_INPUT_STATUS__FAILED;
......@@ -164,7 +164,7 @@ int32_t streamTaskEnqueueRetrieve(SStreamTask* pTask, SStreamRetrieveReq* pReq,
/*pData->blocks = pReq->data;*/
/*pBlock->sourceVer = pReq->sourceVer;*/
streamRetrieveReqToData(pReq, pData);
if (streamTaskInput(pTask, (SStreamQueueItem*)pData) == 0) {
if (tAppendDataForStream(pTask, (SStreamQueueItem*)pData) == 0) {
status = TASK_INPUT_STATUS__NORMAL;
} else {
status = TASK_INPUT_STATUS__FAILED;
......@@ -275,7 +275,57 @@ int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, S
return 0;
}
// int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp) {
// //
// return 0;
// }
int32_t tAppendDataForStream(SStreamTask* pTask, SStreamQueueItem* pItem) {
int8_t type = pItem->type;
if (type == STREAM_INPUT__DATA_SUBMIT) {
SStreamDataSubmit2* pSubmitBlock = streamSubmitBlockClone((SStreamDataSubmit2*)pItem);
if (pSubmitBlock == NULL) {
qDebug("task %d %p submit enqueue failed since out of memory", pTask->taskId, pTask);
terrno = TSDB_CODE_OUT_OF_MEMORY;
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__FAILED);
return -1;
}
int32_t total = taosQueueItemSize(pTask->inputQueue->queue) + 1;
qDebug("stream task:%d %p submit enqueue %p %p %p msgLen:%d ver:%" PRId64 ", total in queue:%d", pTask->taskId,
pTask, pItem, pSubmitBlock, pSubmitBlock->submit.msgStr, pSubmitBlock->submit.msgLen,
pSubmitBlock->submit.ver, total);
taosWriteQitem(pTask->inputQueue->queue, pSubmitBlock);
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
type == STREAM_INPUT__REF_DATA_BLOCK) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
} else if (type == STREAM_INPUT__CHECKPOINT) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
} else if (type == STREAM_INPUT__GET_RES) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
}
if (type != STREAM_INPUT__GET_RES && type != STREAM_INPUT__CHECKPOINT && pTask->triggerParam != 0) {
atomic_val_compare_exchange_8(&pTask->triggerStatus, TASK_TRIGGER_STATUS__INACTIVE, TASK_TRIGGER_STATUS__ACTIVE);
}
#if 0
// TODO: back pressure
atomic_store_8(&pTask->inputStatus, TASK_INPUT_STATUS__NORMAL);
#endif
return 0;
}
void* streamQueueNextItem(SStreamQueue* queue) {
int8_t dequeueFlag = atomic_exchange_8(&queue->status, STREAM_QUEUE__PROCESSING);
if (dequeueFlag == STREAM_QUEUE__FAILED) {
ASSERT(queue->qItem != NULL);
return streamQueueCurItem(queue);
} else {
queue->qItem = NULL;
taosGetQitem(queue->qall, &queue->qItem);
if (queue->qItem == NULL) {
taosReadAllQitems(queue->queue, queue->qall);
taosGetQitem(queue->qall, &queue->qItem);
}
return streamQueueCurItem(queue);
}
}
\ No newline at end of file
......@@ -48,10 +48,12 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock
if (pArray == NULL) {
return -1;
}
taosArrayPush(pArray, &(SSDataBlock){0});
SRetrieveTableRsp* pRetrieve = pReq->pRetrieve;
SSDataBlock* pDataBlock = taosArrayGet(pArray, 0);
blockDecode(pDataBlock, pRetrieve->data);
// TODO: refactor
pDataBlock->info.window.skey = be64toh(pRetrieve->skey);
pDataBlock->info.window.ekey = be64toh(pRetrieve->ekey);
......@@ -68,32 +70,51 @@ int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock
SStreamDataSubmit2* streamDataSubmitNew(SPackedData submit) {
SStreamDataSubmit2* pDataSubmit = (SStreamDataSubmit2*)taosAllocateQitem(sizeof(SStreamDataSubmit2), DEF_QITEM, 0);
if (pDataSubmit == NULL) return NULL;
if (pDataSubmit == NULL) {
return NULL;
}
pDataSubmit->dataRef = (int32_t*)taosMemoryMalloc(sizeof(int32_t));
if (pDataSubmit->dataRef == NULL) goto FAIL;
if (pDataSubmit->dataRef == NULL) {
taosFreeQitem(pDataSubmit);
return NULL;
}
pDataSubmit->submit = submit;
*pDataSubmit->dataRef = 1;
*pDataSubmit->dataRef = 1; // initialize the reference count to be 1
pDataSubmit->type = STREAM_INPUT__DATA_SUBMIT;
return pDataSubmit;
FAIL:
taosFreeQitem(pDataSubmit);
return NULL;
}
void streamDataSubmitDestroy(SStreamDataSubmit2* pDataSubmit) {
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
ASSERT(ref >= 0 && pDataSubmit->type == STREAM_INPUT__DATA_SUBMIT);
if (ref == 0) {
taosMemoryFree(pDataSubmit->submit.msgStr);
taosMemoryFree(pDataSubmit->dataRef);
}
}
SStreamMergedSubmit2* streamMergedSubmitNew() {
SStreamMergedSubmit2* pMerged = (SStreamMergedSubmit2*)taosAllocateQitem(sizeof(SStreamMergedSubmit2), DEF_QITEM, 0);
if (pMerged == NULL) {
return NULL;
}
if (pMerged == NULL) return NULL;
pMerged->submits = taosArrayInit(0, sizeof(SPackedData));
pMerged->dataRefs = taosArrayInit(0, sizeof(void*));
if (pMerged->dataRefs == NULL || pMerged->submits == NULL) goto FAIL;
if (pMerged->dataRefs == NULL || pMerged->submits == NULL) {
taosArrayDestroy(pMerged->submits);
taosArrayDestroy(pMerged->dataRefs);
taosFreeQitem(pMerged);
return NULL;
}
pMerged->type = STREAM_INPUT__MERGED_SUBMIT;
return pMerged;
FAIL:
if (pMerged->submits) taosArrayDestroy(pMerged->submits);
if (pMerged->dataRefs) taosArrayDestroy(pMerged->dataRefs);
taosFreeQitem(pMerged);
return NULL;
}
int32_t streamMergeSubmit(SStreamMergedSubmit2* pMerged, SStreamDataSubmit2* pSubmit) {
......@@ -107,26 +128,17 @@ static FORCE_INLINE void streamDataSubmitRefInc(SStreamDataSubmit2* pDataSubmit)
atomic_add_fetch_32(pDataSubmit->dataRef, 1);
}
SStreamDataSubmit2* streamSubmitRefClone(SStreamDataSubmit2* pSubmit) {
SStreamDataSubmit2* streamSubmitBlockClone(SStreamDataSubmit2* pSubmit) {
SStreamDataSubmit2* pSubmitClone = taosAllocateQitem(sizeof(SStreamDataSubmit2), DEF_QITEM, 0);
if (pSubmitClone == NULL) {
return NULL;
}
streamDataSubmitRefInc(pSubmit);
memcpy(pSubmitClone, pSubmit, sizeof(SStreamDataSubmit2));
return pSubmitClone;
}
void streamDataSubmitRefDec(SStreamDataSubmit2* pDataSubmit) {
int32_t ref = atomic_sub_fetch_32(pDataSubmit->dataRef, 1);
ASSERT(ref >= 0);
if (ref == 0) {
taosMemoryFree(pDataSubmit->submit.msgStr);
taosMemoryFree(pDataSubmit->dataRef);
}
}
SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* elem) {
ASSERT(elem);
if (dst->type == STREAM_INPUT__DATA_BLOCK && elem->type == STREAM_INPUT__DATA_BLOCK) {
......@@ -164,7 +176,7 @@ void streamFreeQitem(SStreamQueueItem* data) {
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)blockDataFreeRes);
taosFreeQitem(data);
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
streamDataSubmitRefDec((SStreamDataSubmit2*)data);
streamDataSubmitDestroy((SStreamDataSubmit2*)data);
taosFreeQitem(data);
} else if (type == STREAM_INPUT__MERGED_SUBMIT) {
SStreamMergedSubmit2* pMerge = (SStreamMergedSubmit2*)data;
......
......@@ -34,7 +34,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
const SStreamDataSubmit2* pSubmit = (const SStreamDataSubmit2*)data;
qDebug("task %d %p set submit input %p %p %d %" PRId64, pTask->taskId, pTask, pSubmit, pSubmit->submit.msgStr,
qDebug("stream task:%d %p set submit input %p %p %d %" PRId64, pTask->taskId, pTask, pSubmit, pSubmit->submit.msgStr,
pSubmit->submit.msgLen, pSubmit->submit.ver);
qSetMultiStreamInput(exec, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
......@@ -268,9 +268,10 @@ int32_t streamExecForAll(SStreamTask* pTask) {
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
qDebug("stream task %d exec begin, msg batch: %d", pTask->taskId, batchCnt);
qDebug("stream task:%d exec begin, msg batch: %d", pTask->taskId, batchCnt);
streamTaskExecImpl(pTask, input, pRes);
qDebug("stream task %d exec end", pTask->taskId);
qDebug("stream task:%d exec end", pTask->taskId);
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0);
......
......@@ -33,7 +33,6 @@ SWalReader *walOpenReader(SWal *pWal, SWalFilterCond *cond) {
pReader->pLogFile = NULL;
pReader->curVersion = -1;
pReader->curFileFirstVer = -1;
pReader->curInvalid = 1;
pReader->capacity = 0;
if (cond) {
pReader->cond = *cond;
......@@ -81,7 +80,6 @@ int32_t walNextValidMsg(SWalReader *pReader) {
wDebug("vgId:%d, wal start to fetch, index:%" PRId64 ", last index:%" PRId64 " commit index:%" PRId64
", applied index:%" PRId64 ", end index:%" PRId64,
pReader->pWal->cfg.vgId, fetchVer, lastVer, committedVer, appliedVer, endVer);
pReader->curStopped = 0;
while (fetchVer <= endVer) {
if (walFetchHeadNew(pReader, fetchVer) < 0) {
return -1;
......@@ -99,7 +97,6 @@ int32_t walNextValidMsg(SWalReader *pReader) {
fetchVer = pReader->curVersion;
}
}
pReader->curStopped = 1;
return -1;
}
......@@ -196,17 +193,16 @@ int32_t walReadSeekVerImpl(SWalReader *pReader, int64_t ver) {
return -1;
}
wDebug("vgId:%d, wal version reset from %" PRId64 "(invalid:%d) to %" PRId64, pReader->pWal->cfg.vgId,
pReader->curVersion, pReader->curInvalid, ver);
wDebug("vgId:%d, wal version reset from %" PRId64 " to %" PRId64, pReader->pWal->cfg.vgId,
pReader->curVersion, ver);
pReader->curVersion = ver;
pReader->curInvalid = 0;
return 0;
}
int32_t walReadSeekVer(SWalReader *pReader, int64_t ver) {
SWal *pWal = pReader->pWal;
if (!pReader->curInvalid && ver == pReader->curVersion) {
if (ver == pReader->curVersion) {
wDebug("vgId:%d, wal index:%" PRId64 " match, no need to reset", pReader->pWal->cfg.vgId, ver);
return 0;
}
......@@ -238,7 +234,7 @@ static int32_t walFetchHeadNew(SWalReader *pRead, int64_t fetchVer) {
wDebug("vgId:%d, wal starts to fetch head, index:%" PRId64, pRead->pWal->cfg.vgId, fetchVer);
if (pRead->curInvalid || pRead->curVersion != fetchVer) {
if (pRead->curVersion != fetchVer) {
if (walReadSeekVer(pRead, fetchVer) < 0) {
// pRead->curVersion = fetchVer;
// pRead->curInvalid = 1;
......@@ -344,7 +340,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) {
return -1;
}
if (pRead->curInvalid || pRead->curVersion != ver) {
if (pRead->curVersion != ver) {
code = walReadSeekVer(pRead, ver);
if (code < 0) {
// pRead->curVersion = ver;
......@@ -479,7 +475,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) {
taosThreadMutexLock(&pReader->mutex);
if (pReader->curInvalid || pReader->curVersion != ver) {
if (pReader->curVersion != ver) {
if (walReadSeekVer(pReader, ver) < 0) {
wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since %s", pReader->pWal->cfg.vgId, ver, terrstr());
taosThreadMutexUnlock(&pReader->mutex);
......@@ -575,7 +571,6 @@ void walReadReset(SWalReader *pReader) {
taosThreadMutexLock(&pReader->mutex);
taosCloseFile(&pReader->pIdxFile);
taosCloseFile(&pReader->pLogFile);
pReader->curInvalid = 1;
pReader->curFileFirstVer = -1;
pReader->curVersion = -1;
taosThreadMutexUnlock(&pReader->mutex);
......
......@@ -628,6 +628,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is inval
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_ERROR, "Consumer error, to see log")
// stream
TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_NOT_EXIST, "Stream task not exist")
......
......@@ -94,7 +94,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStbCtb.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot0.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropNtb-snapshot1.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUdf.py
#,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUdf.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot0.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqUdf-multCtb-snapshot1.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
......
......@@ -111,8 +111,8 @@ endi
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
......@@ -134,7 +134,7 @@ if $data[0][2] != $expectmsgcnt then
print expect $expectmsgcnt , actual $data02
return -1
endi
if $data[0][3] != $expectmsgcnt then
if $data[0][3] != $totalMsgOfStb then
return -1
endi
$loop_cnt = $loop_cnt + 1
......@@ -183,8 +183,8 @@ endi
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -202,7 +202,7 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $totalMsgOfCtb then
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $totalMsgOfCtb then
......@@ -254,8 +254,8 @@ endi
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -273,7 +273,7 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $totalMsgOfNtb then
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $totalMsgOfNtb then
......
......@@ -111,11 +111,11 @@ endi
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
......@@ -142,14 +142,14 @@ if $data[0][1] == 1 then
endi
endi
# either $data[0][2] == $totalMsgOfStb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfStb
if $data[0][2] == $totalMsgOfStb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_0
endi
elif $data[0][2] == 0 then
if $data[1][2] == $totalMsgOfStb then
if $data[1][2] == $expectmsgcnt then
goto check_ok_0
endi
endi
......@@ -214,10 +214,10 @@ endi
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -244,14 +244,14 @@ if $data[0][1] == 1 then
endi
endi
# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
if $data[0][2] == $totalMsgOfCtb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_2
endi
elif $data[0][2] == 0 then
if $data[1][2] == $totalMsgOfCtb then
if $data[1][2] == $expectmsgcnt then
goto check_ok_2
endi
endi
......@@ -316,10 +316,10 @@ endi
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -346,14 +346,14 @@ if $data[1][1] == 0 then
endi
endi
# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
if $data[0][2] == $totalMsgOfNtb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_4
endi
elif $data[0][2] == 0 then
if $data[1][2] == $totalMsgOfNtb then
if $data[1][2] == $expectmsgcnt then
goto check_ok_4
endi
endi
......
......@@ -83,8 +83,8 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$totalMsgOfStb = $totalMsgOfStb * $topicNum
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
......@@ -105,7 +105,7 @@ endi
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $expectmsgcnt then
if $data[0][3] != $totalMsgOfStb then
return -1
endi
......@@ -140,8 +140,8 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb * $topicNum
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -159,7 +159,7 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $totalMsgOfCtb then
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $totalMsgOfCtb then
......@@ -197,8 +197,8 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb * $topicNum
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -216,7 +216,7 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $totalMsgOfNtb then
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $totalMsgOfNtb then
......
......@@ -82,10 +82,10 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$totalMsgOfStb = $totalMsgOfStb * $topicNum
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
......@@ -112,14 +112,14 @@ if $data[0][1] == 1 then
endi
endi
# either $data[0][2] == $totalMsgOfStb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfStb
if $data[0][2] == $totalMsgOfStb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_0
endi
elif $data[0][2] == 0 then
if $data[1][2] == $totalMsgOfStb then
if $data[1][2] == $expectmsgcnt then
goto check_ok_0
endi
endi
......@@ -169,10 +169,10 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb * $topicNum
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -199,14 +199,14 @@ if $data[0][1] == 1 then
endi
endi
# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
if $data[0][2] == $totalMsgOfCtb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_2
endi
elif $data[0][2] == 0 then
if $data[1][2] == $totalMsgOfCtb then
if $data[1][2] == $expectmsgcnt then
goto check_ok_2
endi
endi
......@@ -256,10 +256,10 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb * $topicNum
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -286,14 +286,14 @@ if $data[1][1] == 0 then
endi
endi
# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
if $data[0][2] == $totalMsgOfNtb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_4
endi
elif $data[0][2] == 0 then
if $data[1][2] == $totalMsgOfNtb then
if $data[1][2] == $expectmsgcnt then
goto check_ok_4
endi
endi
......
......@@ -80,8 +80,8 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfOneTopic = $ctbNum * $rowsPerCtb
$totalMsgOfStb = $totalMsgOfOneTopic * $topicNum
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 3
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
$topicList = ' . topic_stb_all
......@@ -89,7 +89,7 @@ $topicList = $topicList . ,
$topicList = $topicList . topic_stb_function
$topicList = $topicList . '
$consumerId = 1
sql insert into consumeinfo values (now +1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now +1s , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
......@@ -118,40 +118,40 @@ endi
# $data[0][2]/$data[1][2] should be between $totalMsgOfOneTopic and $totalMsgOfStb.
if $data[0][2] < $totalMsgOfOneTopic then
return -1
endi
if $data[0][2] > $totalMsgOfStb then
return -1
endi
if $data[1][2] < $totalMsgOfOneTopic then
#if $data[0][2] < $totalMsgOfOneTopic then
# return -1
#endi
if $data[0][2] > $expectmsgcnt then
return -1
endi
if $data[1][2] > $totalMsgOfStb then
#if $data[1][2] < $totalMsgOfOneTopic then
# return -1
#endi
if $data[1][2] > $expectmsgcnt then
return -1
endi
$totalMsgCons = $totalMsgOfOneTopic + $totalMsgOfStb
#$totalMsgCons = $totalMsgOfOneTopic + $totalMsgOfStb
$sumOfMsgCnt = $data[0][2] + $data[1][2]
if $sumOfMsgCnt != $totalMsgCons then
print total: $totalMsgCons
if $sumOfMsgCnt != $expectmsgcnt then
print total: $expectmsgcnt
print sum: $sumOfMsgCnt
return -1
endi
# $data[0][3]/$data[1][3] should be between $totalMsgOfOneTopic and $totalMsgOfStb.
if $data[0][3] < $totalMsgOfOneTopic then
return -1
endi
if $data[0][3] > $totalMsgOfStb then
return -1
endi
if $data[1][3] < $totalMsgOfOneTopic then
return -1
endi
if $data[1][3] > $totalMsgOfStb then
return -1
endi
#if $data[0][3] < $totalMsgOfStb then
# return -1
#endi
#if $data[0][3] > $totalMsgOfStb then
# return -1
#endi
#if $data[1][3] < $totalMsgOfStb then
# return -1
#endi
#if $data[1][3] > $totalMsgOfStb then
# return -1
#endi
$totalMsgCons = $totalMsgOfOneTopic + $totalMsgOfStb
$sumOfRows = $data[0][3] + $data[1][3]
......@@ -189,15 +189,15 @@ $consumerId = 0
$totalMsgOfOneTopic = $rowsPerCtb
$totalMsgOfCtb = $totalMsgOfOneTopic * $topicNum
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
$topicList = ' . topic_ctb_function
$topicList = $topicList . ,
$topicList = $topicList . topic_ctb_all
$topicList = $topicList . '
$consumerId = 1
sql insert into consumeinfo values (now +1s, $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now +1s, $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
......@@ -226,24 +226,24 @@ endi
# either $data[0][2] $totalMsgOfOneTopic and $data[1][2] == $totalMsgOfCtb
# or $data[0][2] $totalMsgOfCtb and $data[1][2] == $totalMsgOfOneTopic
if $data[0][2] == $totalMsgOfOneTopic then
if $data[1][2] == $totalMsgOfCtb then
if $data[0][2] == $topicNum then
if $data[1][2] == 1 then
goto check_ok_0
endi
elif $data[1][2] == $totalMsgOfOneTopic then
if $data[0][2] == $totalMsgOfCtb then
elif $data[0][2] == 1 then
if $data[1][2] == $topicNum then
goto check_ok_0
endi
endi
return -1
check_ok_0:
if $data[0][3] == $totalMsgOfOneTopic then
if $data[1][3] == $totalMsgOfCtb then
if $data[0][3] == $totalMsgOfCtb then
if $data[1][3] == $totalMsgOfOneTopic then
goto check_ok_1
endi
elif $data[1][3] == $totalMsgOfOneTopic then
if $data[0][3] == $totalMsgOfCtb then
elif $data[0][3] == $totalMsgOfOneTopic then
if $data[1][3] == $totalMsgOfCtb then
goto check_ok_1
endi
endi
......@@ -280,8 +280,8 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfOneTopic = $rowsPerCtb
$totalMsgOfNtb = $totalMsgOfOneTopic * $topicNum
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
$topicList = ' . topic_ntb_function
......@@ -289,7 +289,7 @@ $topicList = $topicList . ,
$topicList = $topicList . topic_ntb_all
$topicList = $topicList . '
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -318,12 +318,12 @@ endi
# either $data[0][2] $totalMsgOfOneTopic and $data[1][2] == $totalMsgOfNtb
# or $data[0][2] $totalMsgOfNtb and $data[1][2] == $totalMsgOfOneTopic
if $data[0][2] == $totalMsgOfOneTopic then
if $data[1][2] == $totalMsgOfNtb then
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 1 then
goto check_ok_2
endi
elif $data[1][2] == $totalMsgOfOneTopic then
if $data[0][2] == $totalMsgOfNtb then
elif $data[0][2] == 1 then
if $data[1][2] == $expectmsgcnt then
goto check_ok_2
endi
endi
......@@ -334,8 +334,8 @@ if $data[0][3] == $totalMsgOfOneTopic then
if $data[1][3] == $totalMsgOfNtb then
goto check_ok_3
endi
elif $data[1][3] == $totalMsgOfOneTopic then
if $data[0][3] == $totalMsgOfNtb then
elif $data[0][3] == $totalMsgOfNtb then
if $data[1][3] == $totalMsgOfOneTopic then
goto check_ok_3
endi
endi
......
......@@ -111,8 +111,8 @@ endi
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
......@@ -130,10 +130,10 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $expectmsgcnt then
#if $data[0][2] != $expectmsgcnt then
# return -1
#endi
if $data[0][3] != $totalMsgOfStb then
return -1
endi
$loop_cnt = $loop_cnt + 1
......@@ -182,8 +182,8 @@ endi
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -201,7 +201,7 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $totalMsgOfCtb then
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $totalMsgOfCtb then
......@@ -253,8 +253,8 @@ endi
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -272,7 +272,7 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $totalMsgOfNtb then
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $totalMsgOfNtb then
......
......@@ -110,10 +110,10 @@ endi
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 3
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
......@@ -163,19 +163,19 @@ endi
if $data[0][3] <= 0 then
return -1
endi
if $data[0][3] >= $expectmsgcnt then
if $data[0][3] >= $totalMsgOfStb then
return -1
endi
if $data[1][3] <= 0 then
return -1
endi
if $data[1][3] >= $expectmsgcnt then
if $data[1][3] >= $totalMsgOfStb then
return -1
endi
$sumOfMsgRows = $data[0][3] + $data[1][3]
if $sumOfMsgRows != $expectmsgcnt then
if $sumOfMsgRows != $totalMsgOfStb then
return -1
endi
......@@ -225,10 +225,10 @@ endi
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -255,13 +255,13 @@ if $data[0][1] == 1 then
endi
endi
# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
if $data[0][2] == $totalMsgOfCtb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_0
endi
elif $data[1][2] == $totalMsgOfCtb then
elif $data[1][2] == $expectmsgcnt then
if $data[0][2] == 0 then
goto check_ok_0
endi
......@@ -327,10 +327,10 @@ endi
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -357,13 +357,13 @@ if $data[1][1] == 0 then
endi
endi
# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
if $data[0][2] == $totalMsgOfNtb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_2
endi
elif $data[1][2] == $totalMsgOfNtb then
elif $data[1][2] == $expectmsgcnt then
if $data[0][2] == 0 then
goto check_ok_2
endi
......
......@@ -80,8 +80,8 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$totalMsgOfStb = $totalMsgOfStb * $topicNum
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 9
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
......@@ -102,7 +102,7 @@ endi
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $expectmsgcnt then
if $data[0][3] != $totalMsgOfStb then
return -1
endi
......@@ -137,8 +137,8 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb * $topicNum
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -156,7 +156,7 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $totalMsgOfCtb then
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $totalMsgOfCtb then
......@@ -194,8 +194,8 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb * $topicNum
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -213,7 +213,7 @@ endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $totalMsgOfNtb then
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $totalMsgOfNtb then
......
......@@ -79,10 +79,10 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$totalMsgOfStb = $totalMsgOfStb * $topicNum
$expectmsgcnt = $totalMsgOfStb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = 9
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfStb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
......@@ -131,19 +131,19 @@ endi
if $data[0][3] <= 0 then
return -1
endi
if $data[0][3] >= $expectmsgcnt then
if $data[0][3] >= $totalMsgOfStb then
return -1
endi
if $data[1][3] <= 0 then
return -1
endi
if $data[1][3] >= $expectmsgcnt then
if $data[1][3] >= $totalMsgOfStb then
return -1
endi
$sumOfConsRow = $data[0][3] + $data[1][3]
if $sumOfConsRow != $expectmsgcnt then
if $sumOfConsRow != $totalMsgOfStb then
return -1
endi
......@@ -178,10 +178,10 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb * $topicNum
$expectmsgcnt = $totalMsgOfCtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfCtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -208,14 +208,14 @@ if $data[0][1] == 1 then
endi
endi
# either $data[0][2] == $totalMsgOfCtb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfCtb
if $data[0][2] == $totalMsgOfCtb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_0
endi
elif $data[0][2] == 0 then
if $data[1][2] == $totalMsgOfCtb then
if $data[1][2] == $expectmsgcnt then
goto check_ok_0
endi
endi
......@@ -266,10 +266,10 @@ $topicList = $topicList . '
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb * $topicNum
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
$expectmsgcnt = $topicNum
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
$consumerId = 1
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
sql insert into consumeinfo values (now+1s , $consumerId , $topicList , $keyList , $totalMsgOfNtb , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
......@@ -296,14 +296,14 @@ if $data[1][1] == 0 then
endi
endi
# either $data[0][2] == $totalMsgOfNtb and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $totalMsgOfNtb
if $data[0][2] == $totalMsgOfNtb then
# either $data[0][2] == $expectmsgcnt and $data[1][2] == 0
# or $data[0][2] == 0 and $data[1][2] == $expectmsgcnt
if $data[0][2] == $expectmsgcnt then
if $data[1][2] == 0 then
goto check_ok_2
endi
elif $data[0][2] == 0 then
if $data[1][2] == $totalMsgOfNtb then
if $data[1][2] == $expectmsgcnt then
goto check_ok_2
endi
endi
......
......@@ -226,7 +226,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -307,7 +307,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......
......@@ -228,7 +228,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -303,7 +303,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -315,7 +315,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt/4:
if totalConsumeRows < expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
......@@ -333,7 +333,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
if totalConsumeRows < expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
......@@ -386,7 +386,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -398,7 +398,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt/4:
if totalConsumeRows < expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
......@@ -416,7 +416,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != (expectrowcnt * (1 + 1/4)):
if totalConsumeRows < (expectrowcnt * (1 + 1/4)):
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
......
......@@ -233,7 +233,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -320,7 +320,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......
......@@ -233,7 +233,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume 0 processor")
pollDelay = 100
pollDelay = 10
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -251,6 +251,7 @@ class TDTestCase:
tdLog.info("start consume 1 processor")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
tdLog.sleep(2)
tdLog.info("start one new thread to insert data")
parameterDict['actionType'] = actionType.INSERT_DATA
......@@ -271,6 +272,7 @@ class TDTestCase:
tdLog.info("start consume 2 processor")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
tdLog.sleep(2)
tdLog.info("start one new thread to insert data")
parameterDict['actionType'] = actionType.INSERT_DATA
......@@ -338,7 +340,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume 0 processor")
pollDelay = 100
pollDelay = 20
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -380,6 +382,7 @@ class TDTestCase:
tdLog.info("start consume 2 processor")
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
tdLog.sleep(2)
tdLog.info("start one new thread to insert data")
parameterDict['actionType'] = actionType.INSERT_DATA
......@@ -394,7 +397,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt*2:
if totalConsumeRows < expectrowcnt*2:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2))
tdLog.exit("tmq consume rows error!")
......
......@@ -215,7 +215,8 @@ class TDTestCase:
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"])
parameterDict["batchNum"],\
parameterDict["startTs"])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
......@@ -233,7 +234,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume 0 processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -269,7 +270,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt-10000:
if totalConsumeRows < expectrowcnt-10000:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000))
tdLog.exit("tmq consume rows error!")
......@@ -328,7 +329,8 @@ class TDTestCase:
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"])
parameterDict["batchNum"],\
parameterDict["startTs"])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
......@@ -346,7 +348,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -415,7 +417,8 @@ class TDTestCase:
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"])
parameterDict["batchNum"],\
parameterDict["startTs"])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
......@@ -433,7 +436,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -445,7 +448,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt/4:
if totalConsumeRows < expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
......@@ -467,7 +470,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt/4:
if totalConsumeRows < expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
......@@ -502,7 +505,8 @@ class TDTestCase:
parameterDict["stbName"],\
parameterDict["ctbNum"],\
parameterDict["rowsPerTbl"],\
parameterDict["batchNum"])
parameterDict["batchNum"],\
parameterDict["startTs"])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
......@@ -520,7 +524,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt/4,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -532,7 +536,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt/4:
if totalConsumeRows < expectrowcnt/4:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4))
tdLog.exit("tmq consume rows error!")
......@@ -555,7 +559,7 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt*(1/2+1/4):
if totalConsumeRows < expectrowcnt*(1/2+1/4):
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4)))
tdLog.exit("tmq consume rows error!")
......
......@@ -231,7 +231,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......@@ -305,7 +305,7 @@ class TDTestCase:
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
pollDelay = 100
pollDelay = 5
showMsg = 1
showRow = 1
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
......
......@@ -53,7 +53,7 @@ class TDTestCase:
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1)
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
......@@ -237,10 +237,10 @@ class TDTestCase:
if self.snapshot == 0:
consumerId = 2
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4))
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
elif self.snapshot == 1:
consumerId = 3
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4))
topicList = topicFromStb1
ifcheckdata = 1
......@@ -270,7 +270,7 @@ class TDTestCase:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error with snapshot = 0!")
elif self.snapshot == 1:
if totalConsumeRows != totalRowsFromQuery:
if totalConsumeRows != expectrowcnt:
tdLog.exit("tmq consume rows error with snapshot = 1!")
# tmqCom.checkFileContent(consumerId, queryString)
......@@ -323,7 +323,7 @@ class TDTestCase:
if self.snapshot == 0:
consumerId = 4
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1/4 + 3/4))
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"])
elif self.snapshot == 1:
consumerId = 5
expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4))
......@@ -369,11 +369,7 @@ class TDTestCase:
tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
if self.snapshot == 0:
# If data writing is completed before consumer get snapshot, will consume 7500 from wal;
# If data writing has not started before consumer get snapshot, will consume 10000 from wal;
minRows = int(expectrowcnt * (1 - 1/4)) # 7500
tdLog.info("consume rows should be between %d and %d, "%(minRows, expectrowcnt))
if not ((totalConsumeRows >= minRows) and (totalConsumeRows <= expectrowcnt)):
if (totalConsumeRows != expectrowcnt):
tdLog.exit("tmq consume rows error with snapshot = 0!")
elif self.snapshot == 1:
tdLog.info("consume rows should be between %d and %d, "%(totalRowsFromQuery, expectrowcnt))
......@@ -494,7 +490,7 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 4 end ...... ")
def run(self):
# tdSql.prepare()
tdSql.prepare()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.snapshot = 0
......@@ -520,11 +516,11 @@ class TDTestCase:
self.prepareTestEnv()
self.tmqCase3()
tdLog.printNoPrefix("=============================================")
tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
self.snapshot = 0
self.prepareTestEnv()
self.tmqCase4()
# tdLog.printNoPrefix("=============================================")
# tdLog.printNoPrefix("======== snapshot is 0: only consume from wal")
# self.snapshot = 0
# self.prepareTestEnv()
# self.tmqCase4()
tdLog.printNoPrefix("====================================================================")
tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal")
self.snapshot = 1
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册