提交 850fa1cd 编写于 作者: D dapan1121

Merge branch 'develop' into hotfix/TD-3030

......@@ -46,6 +46,7 @@ def pre_test(){
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|//src//connector|Jenkinsfile' || exit 0
find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\;
cd ${WK}
git reset --hard HEAD~10
git checkout develop
......@@ -115,7 +116,6 @@ pipeline {
sh '''
date
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p1
date'''
}
......@@ -131,7 +131,6 @@ pipeline {
sh '''
date
cd ${WKC}/tests
find pytest -name '*'sql|xargs rm -rf
./test-all.sh p2
date'''
}
......
......@@ -123,6 +123,7 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
......@@ -133,6 +134,7 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscQueryTags(SQueryInfo* pQueryInfo);
bool tscMultiRoundQuery(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscQueryBlockInfo(SQueryInfo* pQueryInfo);
SSqlExpr* tscAddFuncInSelectClause(SQueryInfo* pQueryInfo, int32_t outputColIndex, int16_t functionId,
SColumnIndex* pIndex, SSchema* pColSchema, int16_t colType);
......@@ -152,7 +154,6 @@ SInternalField* tscFieldInfoInsert(SFieldInfo* pFieldInfo, int32_t index, TAOS_F
SInternalField* tscFieldInfoGetInternalField(SFieldInfo* pFieldInfo, int32_t index);
TAOS_FIELD* tscFieldInfoGetField(SFieldInfo* pFieldInfo, int32_t index);
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
void tscFieldInfoUpdateOffset(SQueryInfo* pQueryInfo);
int16_t tscFieldInfoGetOffset(SQueryInfo* pQueryInfo, int32_t index);
......
......@@ -198,9 +198,10 @@ typedef struct STableDataBlocks {
typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
STimeWindow window; // the whole query time window
STimeWindow window; // query time window
SInterval interval;
SInterval interval; // tumble time window
SSessionWindow sessionWindow; // session time window
SSqlGroupbyExpr groupbyExpr; // group by tags info
SArray * colList; // SArray<SColumn*>
......@@ -232,6 +233,7 @@ typedef struct SQueryInfo {
typedef struct {
int command;
uint8_t msgType;
char reserve1[3]; // fix bus error on arm32
bool autoCreated; // create table if it is not existed during retrieve table meta in mnode
union {
......@@ -244,8 +246,10 @@ typedef struct {
char * curSql; // current sql, resume position of sql after parsing paused
int8_t parseFinished;
char reserve2[3]; // fix bus error on arm32
int16_t numOfCols;
char reserve3[2]; // fix bus error on arm32
uint32_t allocSize;
char * payload;
int32_t payloadLen;
......@@ -255,7 +259,9 @@ typedef struct {
int32_t numOfParams;
int8_t dataSourceType; // load data from file or not
char reserve4[3]; // fix bus error on arm32
int8_t submitSchema; // submit block is built with table schema
char reserve5[3]; // fix bus error on arm32
STagData tagData; // NOTE: pTagData->data is used as a variant length array
SName **pTableNameList; // all involved tableMeta list of current insert sql statement.
......@@ -397,7 +403,6 @@ typedef struct SSqlStream {
void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable);
int tscAcquireRpc(const char *key, const char *user, const char *secret,void **pRpcObj);
void tscReleaseRpc(void *param);
void tscInitMsgsFp();
......
......@@ -100,6 +100,10 @@ static void tscInitSqlContext(SSqlCmd *pCmd, SLocalMerger *pReducer, tOrderDescr
} else if (functionId == TSDB_FUNC_APERCT) {
pCtx->param[0].i64 = pExpr->param[0].i64;
pCtx->param[0].nType = pExpr->param[0].nType;
} else if (functionId == TSDB_FUNC_BLKINFO) {
pCtx->param[0].i64 = pExpr->param[0].i64;
pCtx->param[0].nType = pExpr->param[0].nType;
pCtx->numOfParams = 1;
}
pCtx->interBufBytes = pExpr->interBytes;
......@@ -951,10 +955,10 @@ static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutp
// todo extract function
int64_t actualETime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.ekey: pQueryInfo->window.skey;
tFilePage **pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
void** pResPages = malloc(POINTER_BYTES * pQueryInfo->fieldsInfo.numOfOutput);
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
pResPages[i] = calloc(1, sizeof(tFilePage) + pField->bytes * pLocalMerge->resColModel->capacity);
pResPages[i] = calloc(1, pField->bytes * pLocalMerge->resColModel->capacity);
}
while (1) {
......@@ -966,7 +970,7 @@ static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutp
if (pQueryInfo->limit.offset > 0) {
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
memmove(pResPages[i]->data, pResPages[i]->data + pField->bytes * pQueryInfo->limit.offset,
memmove(pResPages[i], ((char*)pResPages[i]) + pField->bytes * pQueryInfo->limit.offset,
(size_t)(newRows * pField->bytes));
}
}
......@@ -1010,7 +1014,7 @@ static void doFillResult(SSqlObj *pSql, SLocalMerger *pLocalMerge, bool doneOutp
int32_t offset = 0;
for (int32_t i = 0; i < pQueryInfo->fieldsInfo.numOfOutput; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i]->data, (size_t)(pField->bytes * pRes->numOfRows));
memcpy(pRes->data + offset * pRes->numOfRows, pResPages[i], (size_t)(pField->bytes * pRes->numOfRows));
offset += pField->bytes;
}
......
......@@ -307,7 +307,8 @@ int32_t tsParseOneColumnData(SSchema *pSchema, SStrToken *pToken, char *payload,
return tscInvalidSQLErrMsg(msg, "illegal float data", pToken->z);
}
*((float *)payload) = (float)dv;
// *((float *)payload) = (float)dv;
SET_FLOAT_VAL(payload, dv);
}
break;
......@@ -1359,7 +1360,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) {
}
}
} else {
SSqlInfo SQLInfo = qSQLParse(pSql->sqlstr);
SSqlInfo SQLInfo = qSqlParse(pSql->sqlstr);
ret = tscToSQLCmd(pSql, &SQLInfo);
if (ret == TSDB_CODE_TSC_INVALID_SQL && pSql->parseRetry == 0 && SQLInfo.type == TSDB_SQL_NULL) {
tscResetSqlCmd(pCmd, true);
......
此差异已折叠。
......@@ -497,8 +497,6 @@ int tscProcessSql(SSqlObj *pSql) {
return pSql->res.code;
}
} else if (pCmd->command >= TSDB_SQL_LOCAL) {
//pSql->epSet = tscMgmtEpSet;
// } else { // local handler
return (*tscProcessMsgRsp[pCmd->command])(pSql);
}
......@@ -645,7 +643,6 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
}
pSql->epSet.inUse = rand()%pSql->epSet.numOfEps;
pQueryMsg->head.vgId = htonl(vgId);
STableIdInfo *pTableIdInfo = (STableIdInfo *)pMsg;
......@@ -660,8 +657,6 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables);
assert(index >= 0 && index < numOfVgroups);
tscDebug("%p query on stable, vgIndex:%d, numOfVgroups:%d", pSql, index, numOfVgroups);
SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, index);
// set the vgroup info
......@@ -670,7 +665,10 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char
int32_t numOfTables = (int32_t)taosArrayGetSize(pTableIdList->itemList);
pQueryMsg->numOfTables = htonl(numOfTables); // set the number of tables
tscDebug("%p query on stable, vgId:%d, numOfTables:%d, vgIndex:%d, numOfVgroups:%d", pSql,
pTableIdList->vgInfo.vgId, numOfTables, index, numOfVgroups);
// serialize each table id info
for(int32_t i = 0; i < numOfTables; ++i) {
STableIdInfo* pItem = taosArrayGet(pTableIdList->itemList, i);
......@@ -705,7 +703,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STableMeta * pTableMeta = pTableMetaInfo->pTableMeta;
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) {
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo) && !tscQueryBlockInfo(pQueryInfo)) {
tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", pSql, (uint64_t)numOfSrcCols,
tscGetNumOfColumns(pTableMeta));
......@@ -756,6 +754,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit);
pQueryMsg->sqlstrLen = htonl(sqlLen);
pQueryMsg->prevResultLen = htonl(pQueryInfo->bufLen);
pQueryMsg->sw.gap = htobe64(pQueryInfo->sessionWindow.gap);
pQueryMsg->sw.primaryColId = htonl(PRIMARYKEY_TIMESTAMP_COL_INDEX);
size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo);
pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number
......@@ -835,13 +835,31 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pSqlFuncExpr->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
pSqlFuncExpr->colInfo.flag = htons(pExpr->colInfo.flag);
if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) {
pSqlFuncExpr->colType = htons(pExpr->resType);
pSqlFuncExpr->colBytes = htons(pExpr->resBytes);
} else if (pExpr->colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) {
SSchema *s = tGetTbnameColumnSchema();
pSqlFuncExpr->colType = htons(s->type);
pSqlFuncExpr->colBytes = htons(s->bytes);
} else if (pExpr->colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX) {
SSchema s = tGetBlockDistColumnSchema();
pSqlFuncExpr->colType = htons(s.type);
pSqlFuncExpr->colBytes = htons(s.bytes);
} else {
SSchema* s = tscGetColumnSchemaById(pTableMeta, pExpr->colInfo.colId);
pSqlFuncExpr->colType = htons(s->type);
pSqlFuncExpr->colBytes = htons(s->bytes);
}
pSqlFuncExpr->functionId = htons(pExpr->functionId);
pSqlFuncExpr->numOfParams = htons(pExpr->numOfParams);
pSqlFuncExpr->resColId = htons(pExpr->resColId);
pMsg += sizeof(SSqlFuncMsg);
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
// todo add log
for (int32_t j = 0; j < pExpr->numOfParams; ++j) { // todo add log
pSqlFuncExpr->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
pSqlFuncExpr->arg[j].argBytes = htons(pExpr->param[j].nLen);
......@@ -866,6 +884,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
for (int32_t i = 0; i < output; ++i) {
SInternalField* pField = tscFieldInfoGetInternalField(&pQueryInfo->fieldsInfo, i);
SSqlExpr *pExpr = pField->pSqlExpr;
// this should be switched to projection query
if (pExpr != NULL) {
// the queried table has been removed and a new table with the same name has already been created already
// return error msg
......@@ -879,33 +899,31 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
pSqlFuncExpr1->colInfo.colId = htons(pExpr->colInfo.colId);
pSqlFuncExpr1->colInfo.colIndex = htons(pExpr->colInfo.colIndex);
pSqlFuncExpr1->colInfo.flag = htons(pExpr->colInfo.flag);
pSqlFuncExpr1->functionId = htons(pExpr->functionId);
pSqlFuncExpr1->numOfParams = htons(pExpr->numOfParams);
pMsg += sizeof(SSqlFuncMsg);
for (int32_t j = 0; j < pExpr->numOfParams; ++j) {
// todo add log
pSqlFuncExpr1->arg[j].argType = htons((uint16_t)pExpr->param[j].nType);
pSqlFuncExpr1->arg[j].argBytes = htons(pExpr->param[j].nLen);
if (pExpr->param[j].nType == TSDB_DATA_TYPE_BINARY) {
memcpy(pMsg, pExpr->param[j].pz, pExpr->param[j].nLen);
pMsg += pExpr->param[j].nLen;
} else {
pSqlFuncExpr1->arg[j].argValue.i64 = htobe64(pExpr->param[j].i64);
pSqlFuncExpr1->numOfParams = 0; // no params for projection query
pSqlFuncExpr1->functionId = htons(TSDB_FUNC_PRJ);
pSqlFuncExpr1->colInfo.colId = htons(pExpr->resColId);
pSqlFuncExpr1->colInfo.flag = htons(TSDB_COL_NORMAL);
bool assign = false;
for (int32_t f = 0; f < tscSqlExprNumOfExprs(pQueryInfo); ++f) {
SSqlExpr *pe = tscSqlExprGet(pQueryInfo, f);
if (pe == pExpr) {
pSqlFuncExpr1->colInfo.colIndex = htons(f);
pSqlFuncExpr1->colType = htons(pe->resType);
pSqlFuncExpr1->colBytes = htons(pe->resBytes);
assign = true;
break;
}
}
assert(assign);
pMsg += sizeof(SSqlFuncMsg);
pSqlFuncExpr1 = (SSqlFuncMsg *)pMsg;
} else {
assert(pField->pArithExprInfo != NULL);
SExprInfo* pExprInfo = pField->pArithExprInfo;
pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId);
pSqlFuncExpr1->colInfo.colId = htons(pExprInfo->base.colInfo.colId);
pSqlFuncExpr1->functionId = htons(pExprInfo->base.functionId);
pSqlFuncExpr1->numOfParams = htons(pExprInfo->base.numOfParams);
pMsg += sizeof(SSqlFuncMsg);
......@@ -1332,7 +1350,7 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &(pSql->cmd);
int32_t size = minMsgSize() + sizeof(SCMCreateTableMsg) + sizeof(SCreateTableMsg);
SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo;
SCreateTableSql *pCreateTableInfo = pInfo->pCreateTableInfo;
if (pCreateTableInfo->type == TSQL_CREATE_TABLE_FROM_STABLE) {
int32_t numOfTables = (int32_t)taosArrayGetSize(pInfo->pCreateTableInfo->childTableInfo);
size += numOfTables * (sizeof(SCreateTableMsg) + TSDB_MAX_TAGS_LEN);
......@@ -1341,7 +1359,7 @@ int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) {
}
if (pCreateTableInfo->pSelect != NULL) {
size += (pCreateTableInfo->pSelect->selectToken.n + 1);
size += (pCreateTableInfo->pSelect->sqlstr.n + 1);
}
return size + TSDB_EXTRA_PAYLOAD_SIZE;
......@@ -1399,7 +1417,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pCreateMsg->tableName);
assert(code == 0);
SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo;
SCreateTableSql *pCreateTable = pInfo->pCreateTableInfo;
pCreateMsg->igExists = pCreateTable->existCheck ? 1 : 0;
pCreateMsg->numOfColumns = htons(pCmd->numOfCols);
......@@ -1422,11 +1440,11 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pMsg = (char *)pSchema;
if (type == TSQL_CREATE_STREAM) { // check if it is a stream sql
SQuerySQL *pQuerySql = pInfo->pCreateTableInfo->pSelect;
SQuerySqlNode *pQuerySql = pInfo->pCreateTableInfo->pSelect;
strncpy(pMsg, pQuerySql->selectToken.z, pQuerySql->selectToken.n + 1);
pCreateMsg->sqlLen = htons(pQuerySql->selectToken.n + 1);
pMsg += pQuerySql->selectToken.n + 1;
strncpy(pMsg, pQuerySql->sqlstr.z, pQuerySql->sqlstr.n + 1);
pCreateMsg->sqlLen = htons(pQuerySql->sqlstr.n + 1);
pMsg += pQuerySql->sqlstr.n + 1;
}
}
......
......@@ -503,9 +503,19 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) {
SSqlCmd *pCmd = &pSql->cmd;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single tabel subscription
pQueryInfo->window.skey = ((SSubscriptionProgress*)taosArrayGet(pSub->progress, 0))->key;
tscDebug("subscribe:%s set subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
if (taosArrayGetSize(pSub->progress) > 0) { // fix crash in single table subscription
size_t size = taosArrayGetSize(pSub->progress);
TSKEY s = INT64_MAX;
for(int32_t i = 0; i < size; ++i) {
TSKEY k = ((SSubscriptionProgress*)taosArrayGet(pSub->progress, i))->key;
if (s > k) {
s = k;
}
}
pQueryInfo->window.skey = s;
tscDebug("subscribe:%s set next round subscribe skey:%"PRId64, pSub->topic, pQueryInfo->window.skey);
}
if (pSub->pTimer == NULL) {
......
......@@ -74,14 +74,14 @@ static bool allSubqueryDone(SSqlObj *pParentSql) {
SSubqueryState *subState = &pParentSql->subState;
//lock in caller
tscDebug("%p total subqueries: %d", pParentSql, subState->numOfSub);
for (int i = 0; i < subState->numOfSub; i++) {
if (0 == subState->states[i]) {
tscDebug("%p subquery:%p,%d is NOT finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
tscDebug("%p subquery:%p, index: %d NOT finished, abort query completion check", pParentSql, pParentSql->pSubs[i], i);
done = false;
break;
} else {
tscDebug("%p subquery:%p,%d is finished, total:%d", pParentSql, pParentSql->pSubs[i], i, subState->numOfSub);
tscDebug("%p subquery:%p, index: %d finished", pParentSql, pParentSql->pSubs[i], i);
}
}
......@@ -453,7 +453,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
pSubQueryInfo->tsBuf = NULL;
// free result for async object will also free sqlObj
assert(tscSqlExprNumOfExprs(pSubQueryInfo) == 1); // ts_comp query only requires one resutl columns
assert(tscSqlExprNumOfExprs(pSubQueryInfo) == 1); // ts_comp query only requires one result columns
taos_free_result(pPrevSub);
SSqlObj *pNew = createSubqueryObj(pSql, (int16_t) i, tscJoinQueryCallback, pSupporter, TSDB_SQL_SELECT, NULL);
......@@ -507,6 +507,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, 0);
int16_t funcId = pExpr->functionId;
// add the invisible timestamp column
if ((pExpr->colInfo.colId != PRIMARYKEY_TIMESTAMP_COL_INDEX) ||
(funcId != TSDB_FUNC_TS && funcId != TSDB_FUNC_TS_DUMMY && funcId != TSDB_FUNC_PRJ)) {
......@@ -847,6 +848,8 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
SSqlRes* pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
// todo, the type may not include TSDB_QUERY_TYPE_TAG_FILTER_QUERY
assert(TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY));
if (pParentSql->res.code != TSDB_CODE_SUCCESS) {
......@@ -1059,7 +1062,6 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow
tscError("%p invalid ts comp file from vnode, abort subquery, file size:%d", pSql, numOfRows);
pParentSql->res.code = TAOS_SYSTEM_ERROR(errno);
if (quitAllSubquery(pSql, pParentSql, pSupporter)){
return;
}
......@@ -1880,6 +1882,13 @@ void doAppendData(SInterResult* pInterResult, TAOS_ROW row, int32_t numOfCols, S
}
}
if (p && taosArrayGetSize(p) > 0) {
SResPair *l = taosArrayGetLast(p);
if (l->key == key && key == INT64_MIN) {
continue;
}
}
//append a new column
if (p == NULL) {
SStddevInterResult t = {.colId = id, .pResult = taosArrayInit(10, sizeof(SResPair)),};
......@@ -2643,12 +2652,17 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
pQueryInfo->type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY;
// clear the limit/offset info, since it should not be sent to vnode to be executed.
pQueryInfo->limit.limit = -1;
pQueryInfo->limit.offset = 0;
assert(pQueryInfo->numOfTables == 1 && pNew->cmd.numOfClause == 1 && trsupport->subqueryIndex < pSql->subState.numOfSub);
// launch subquery for each vnode, so the subquery index equals to the vgroupIndex.
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, table_index);
pTableMetaInfo->vgroupIndex = trsupport->subqueryIndex;
pSql->pSubs[trsupport->subqueryIndex] = pNew;
}
......@@ -3102,30 +3116,6 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) {
}
}
static UNUSED_FUNC void transferNcharData(SSqlObj *pSql, int32_t columnIndex, TAOS_FIELD *pField) {
SSqlRes *pRes = &pSql->res;
if (pRes->tsrow[columnIndex] != NULL && pField->type == TSDB_DATA_TYPE_NCHAR) {
// convert unicode to native code in a temporary buffer extra one byte for terminated symbol
if (pRes->buffer[columnIndex] == NULL) {
pRes->buffer[columnIndex] = malloc(pField->bytes + TSDB_NCHAR_SIZE);
}
/* string terminated char for binary data*/
memset(pRes->buffer[columnIndex], 0, pField->bytes + TSDB_NCHAR_SIZE);
int32_t length = taosUcs4ToMbs(pRes->tsrow[columnIndex], pRes->length[columnIndex], pRes->buffer[columnIndex]);
if ( length >= 0 ) {
pRes->tsrow[columnIndex] = (unsigned char*)pRes->buffer[columnIndex];
pRes->length[columnIndex] = length;
} else {
tscError("%p charset:%s to %s. val:%s convert failed.", pSql, DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)pRes->tsrow[columnIndex]);
pRes->tsrow[columnIndex] = NULL;
pRes->length[columnIndex] = 0;
}
}
}
char *getArithmeticInputSrc(void *param, const char *name, int32_t colId) {
SArithmeticSupport *pSupport = (SArithmeticSupport *) param;
......
......@@ -97,6 +97,22 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
return true;
}
bool tscQueryBlockInfo(SQueryInfo* pQueryInfo) {
int32_t numOfCols = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i);
int32_t functId = pExpr->functionId;
// "select count(tbname)" query
if (functId == TSDB_FUNC_BLKINFO) {
return true;
}
}
return false;
}
bool tscIsTwoStageSTableQuery(SQueryInfo* pQueryInfo, int32_t tableIndex) {
if (pQueryInfo == NULL) {
return false;
......@@ -223,6 +239,21 @@ bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo) {
return false;
}
bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
SSqlGroupbyExpr* pGroupbyExpr = &pQueryInfo->groupbyExpr;
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
SColIndex* pIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
if (!TSDB_COL_IS_TAG(pIndex->flag) && pIndex->colIndex < numOfCols) { // group by normal columns
return true;
}
}
return false;
}
bool tscIsTWAQuery(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscSqlExprNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < numOfExprs; ++i) {
......@@ -1722,10 +1753,15 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->fieldsInfo.internalField = taosArrayInit(4, sizeof(SInternalField));
assert(pQueryInfo->exprList == NULL);
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
pQueryInfo->resColumnId= -1000;
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
pQueryInfo->resColumnId = -1000;
pQueryInfo->limit.limit = -1;
pQueryInfo->limit.offset = 0;
pQueryInfo->slimit.limit = -1;
pQueryInfo->slimit.offset = 0;
}
int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
......
......@@ -283,12 +283,37 @@ typedef struct {
#define keyCol(pCols) (&((pCols)->cols[0])) // Key column
#define dataColsTKeyAt(pCols, idx) ((TKEY *)(keyCol(pCols)->pData))[(idx)]
#define dataColsKeyAt(pCols, idx) tdGetKey(dataColsTKeyAt(pCols, idx))
#define dataColsTKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, 0))
#define dataColsKeyFirst(pCols) (((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, 0))
#define dataColsTKeyLast(pCols) \
(((pCols)->numOfRows == 0) ? TKEY_INVALID : dataColsTKeyAt(pCols, (pCols)->numOfRows - 1))
#define dataColsKeyLast(pCols) \
(((pCols)->numOfRows == 0) ? TSDB_DATA_TIMESTAMP_NULL : dataColsKeyAt(pCols, (pCols)->numOfRows - 1))
static FORCE_INLINE TKEY dataColsTKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsTKeyAt(pCols, 0);
} else {
return TKEY_INVALID;
}
}
static FORCE_INLINE TSKEY dataColsKeyFirst(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, 0);
} else {
return TSDB_DATA_TIMESTAMP_NULL;
}
}
static FORCE_INLINE TKEY dataColsTKeyLast(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsTKeyAt(pCols, pCols->numOfRows - 1);
} else {
return TKEY_INVALID;
}
}
static FORCE_INLINE TSKEY dataColsKeyLast(SDataCols *pCols) {
if (pCols->numOfRows) {
return dataColsKeyAt(pCols, pCols->numOfRows - 1);
} else {
return TSDB_DATA_TIMESTAMP_NULL;
}
}
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows);
void tdResetDataCols(SDataCols *pCols);
......
......@@ -33,7 +33,7 @@ typedef struct SDataStatis {
typedef struct SColumnInfoData {
SColumnInfo info;
void* pData; // the corresponding block data in memory
char* pData; // the corresponding block data in memory
} SColumnInfoData;
typedef struct SResPair {
......
......@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.6",
version="2.0.7",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
......
......@@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
......@@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""
if num_of_rows > 0:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
else:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
def _crow_bigint_unsigned_to_python(
......@@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
else:
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
......@@ -600,7 +600,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
......@@ -608,7 +608,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
......
......@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.5",
version="2.0.7",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
......
......@@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
......@@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""
if num_of_rows > 0:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
else:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
def _crow_bigint_unsigned_to_python(
......@@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
else:
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
......@@ -600,7 +600,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
......@@ -608,7 +608,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
......
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
import threading
# querySeqNum = 0
......@@ -38,7 +37,6 @@ class TDengineCursor(object):
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
self._threadId = threading.get_ident()
if connection is not None:
self._connection = connection
......@@ -105,12 +103,6 @@ class TDengineCursor(object):
def execute(self, operation, params=None):
"""Prepare and execute a database operation (query or command).
"""
# if threading.get_ident() != self._threadId:
# info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
if not operation:
return None
......@@ -280,12 +272,6 @@ class TDengineCursor(object):
def _handle_result(self):
"""Handle the return result from query.
"""
# if threading.get_ident() != self._threadId:
# info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
self._description = []
for ele in self._fields:
self._description.append(
......
......@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.5",
version="2.0.7",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
......
......@@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
......@@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""
if num_of_rows > 0:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
else:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
def _crow_bigint_unsigned_to_python(
......@@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
else:
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
......@@ -600,7 +600,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
......@@ -608,7 +608,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
......
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
import threading
# querySeqNum = 0
......@@ -38,7 +37,6 @@ class TDengineCursor(object):
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
self._threadId = threading.get_ident()
if connection is not None:
self._connection = connection
......@@ -105,12 +103,6 @@ class TDengineCursor(object):
def execute(self, operation, params=None):
"""Prepare and execute a database operation (query or command).
"""
# if threading.get_ident() != self._threadId:
# info ="Cursor execute:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
if not operation:
return None
......@@ -280,12 +272,6 @@ class TDengineCursor(object):
def _handle_result(self):
"""Handle the return result from query.
"""
# if threading.get_ident() != self._threadId:
# info = "Cursor handleresult:Thread ID not match,creater:"+str(self._threadId)+" caller:"+str(threading.get_ident())
# raise OperationalError(info)
# print(info)
# return None
self._description = []
for ele in self._fields:
self._description.append(
......
......@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.4",
version="2.0.7",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
......
......@@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
......@@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""
if num_of_rows > 0:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
else:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
def _crow_bigint_unsigned_to_python(
......@@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
else:
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
......@@ -600,7 +600,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
......@@ -608,7 +608,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
......
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
import threading
# querySeqNum = 0
......@@ -38,7 +37,6 @@ class TDengineCursor(object):
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
self._threadId = threading.get_ident()
if connection is not None:
self._connection = connection
......
......@@ -5,7 +5,7 @@ with open("README.md", "r") as fh:
setuptools.setup(
name="taos",
version="2.0.4",
version="2.0.7",
author="Taosdata Inc.",
author_email="support@taosdata.com",
description="TDengine python client package",
......
......@@ -22,10 +22,10 @@ def _crow_timestamp_to_python(data, num_of_rows, nbytes=None, micro=False):
if num_of_rows > 0:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
else:
return list(map(_timestamp_converter, ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]))
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]))
def _crow_bool_to_python(data, num_of_rows, nbytes=None, micro=False):
......@@ -145,10 +145,10 @@ def _crow_bigint_to_python(data, num_of_rows, nbytes=None, micro=False):
"""
if num_of_rows > 0:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
else:
return [None if ele == FieldType.C_BIGINT_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(ctypes.c_long))[:abs(num_of_rows)]]
data, ctypes.POINTER(ctypes.c_int64))[:abs(num_of_rows)]]
def _crow_bigint_unsigned_to_python(
......@@ -162,13 +162,13 @@ def _crow_bigint_unsigned_to_python(
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
else:
return [
None if ele == FieldType.C_BIGINT_UNSIGNED_NULL else ele for ele in ctypes.cast(
data, ctypes.POINTER(
ctypes.c_ulong))[
ctypes.c_uint64))[
:abs(num_of_rows)]]
......@@ -600,7 +600,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_INT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BIGINT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_FLOAT):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_float))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_DOUBLE):
......@@ -608,7 +608,7 @@ class CTaosInterface(object):
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_BINARY):
# return (ctypes.cast(data, ctypes.POINTER(ctypes.c_char))[0:byte]).rstrip('\x00')
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_TIMESTAMP):
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_long))[0]
# return ctypes.cast(data, ctypes.POINTER(ctypes.c_int64))[0]
# elif (dtype == CTaosInterface.TSDB_DATA_TYPE_NCHAR):
# return (ctypes.cast(data, ctypes.c_char_p).value).rstrip('\x00')
......
from .cinterface import CTaosInterface
from .error import *
from .constants import FieldType
import threading
# querySeqNum = 0
......@@ -38,7 +37,6 @@ class TDengineCursor(object):
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
self._threadId = threading.get_ident()
if connection is not None:
self._connection = connection
......
......@@ -238,6 +238,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO TAOS_DEF_ERROR_CODE(0, 0x0612) //"Invalid information to create table")
#define TSDB_CODE_TDB_NO_AVAIL_DISK TAOS_DEF_ERROR_CODE(0, 0x0613) //"No available disk")
#define TSDB_CODE_TDB_MESSED_MSG TAOS_DEF_ERROR_CODE(0, 0x0614) //"TSDB messed message")
#define TSDB_CODE_TDB_IVLD_TAG_VAL TAOS_DEF_ERROR_CODE(0, 0x0615) //"TSDB invalid tag value")
// query
#define TSDB_CODE_QRY_INVALID_QHANDLE TAOS_DEF_ERROR_CODE(0, 0x0700) //"Invalid handle")
......@@ -252,7 +253,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_QRY_IN_EXEC TAOS_DEF_ERROR_CODE(0, 0x0709) //"Multiple retrieval of this query")
#define TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW TAOS_DEF_ERROR_CODE(0, 0x070A) //"Too many time window in query")
#define TSDB_CODE_QRY_NOT_ENOUGH_BUFFER TAOS_DEF_ERROR_CODE(0, 0x070B) //"Query buffer limit has reached")
#define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistance in replica")
#define TSDB_CODE_QRY_INCONSISTAN TAOS_DEF_ERROR_CODE(0, 0x070C) //"File inconsistency in replica")
// grant
......
......@@ -394,7 +394,7 @@ typedef struct SColIndex {
int16_t colId; // column id
int16_t colIndex; // column index in colList if it is a normal column or index in tagColList if a tag
uint16_t flag; // denote if it is a tag or a normal column
char name[TSDB_COL_NAME_LEN];
char name[TSDB_COL_NAME_LEN]; // TODO remove it
} SColIndex;
/* sql function msg, to describe the message to vnode about sql function
......@@ -402,7 +402,10 @@ typedef struct SColIndex {
typedef struct SSqlFuncMsg {
int16_t functionId;
int16_t numOfParams;
int16_t resColId; // result column id, id of the current output column
int16_t colType;
int16_t colBytes;
SColIndex colInfo;
struct ArgElem {
......@@ -482,12 +485,13 @@ typedef struct {
int16_t orderColId;
int16_t numOfCols; // the number of columns will be load from vnode
SInterval interval;
SSessionWindow sw; // session window
uint16_t tagCondLen; // tag length in current query
uint32_t tbnameCondLen; // table name filter condition string length
int16_t numOfGroupCols; // num of group by columns
int16_t orderByIdx;
int16_t orderType; // used in group by xx order by xxx
int64_t vgroupLimit; // limit the number of rows for each table, used in order by + limit in stable projection query.
int64_t vgroupLimit; // limit the number of rows for each table, used in order by + limit in stable projection query.
int16_t prjOrder; // global order in super table projection query.
int64_t limit;
int64_t offset;
......@@ -625,6 +629,7 @@ typedef struct {
int32_t maxtablesPerVnode;
int32_t maxVgroupsPerDb;
char arbitrator[TSDB_EP_LEN]; // tsArbitrator
char reserve[2]; // to solve arm32 bus error
char timezone[64]; // tsTimezone
int64_t checkTime; // 1970-01-01 00:00:00.000
char locale[TSDB_LOCALE_LEN]; // tsLocale
......
......@@ -158,13 +158,18 @@ int32_t tsdbInsertData(STsdbRepo *repo, SSubmitMsg *pMsg, SShellSubmitRspMsg *pR
typedef void *TsdbQueryHandleT; // Use void to hide implementation details
// query condition to build vnode iterator
#define BLOCK_LOAD_OFFSET_SEQ_ORDER 1
#define BLOCK_LOAD_TABLE_SEQ_ORDER 2
#define BLOCK_LOAD_TABLE_RR_ORDER 3
// query condition to build multi-table data block iterator
typedef struct STsdbQueryCond {
STimeWindow twindow;
int32_t order; // desc|asc order to iterate the data block
int32_t numOfCols;
SColumnInfo *colList;
bool loadExternalRows; // load external rows or not
int32_t type; // data block load type:
} STsdbQueryCond;
typedef struct SMemRef {
......@@ -181,17 +186,31 @@ typedef struct SDataBlockInfo {
int32_t tid;
} SDataBlockInfo;
typedef struct SFileBlockInfo {
int32_t numOfRows;
} SFileBlockInfo;
typedef struct {
void *pTable;
TSKEY lastKey;
} STableKeyInfo;
typedef struct {
size_t numOfTables;
uint32_t numOfTables;
SArray * pGroupList;
SHashObj *map; // speedup acquire the tableQueryInfo by table uid
} STableGroupInfo;
typedef struct {
uint16_t rowSize;
uint16_t numOfFiles;
uint32_t numOfTables;
uint64_t totalSize;
int32_t firstSeekTimeUs;
uint32_t numOfRowsInMemTable;
SArray *dataBlockInfos;
} STableBlockDist;
/**
* Get the data block iterator, starting from position according to the query condition
*
......@@ -252,16 +271,7 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle);
* @param pQueryHandle
* @return
*/
bool tsdbNextDataBlock(TsdbQueryHandleT *pQueryHandle);
/**
* move to next block if exists but not merge data in memtable
*
* @param pQueryHandle
* @return
*/
bool tsdbNextDataBlockWithoutMerge(TsdbQueryHandleT *pQueryHandle);
SArray* tsdbGetExternalRow(TsdbQueryHandleT *pHandle, SMemRef* pMemRef, int16_t type);
bool tsdbNextDataBlock(TsdbQueryHandleT pQueryHandle);
/**
* Get current data block information
......@@ -306,7 +316,7 @@ int32_t tsdbQuerySTableByTagCond(STsdbRepo *tsdb, uint64_t uid, TSKEY key, const
SColIndex *pColIndex, int32_t numOfCols);
/**
* destory the created table group list, which is generated by tag query
* destroy the created table group list, which is generated by tag query
* @param pGroupList
*/
void tsdbDestroyTableGroup(STableGroupInfo *pGroupList);
......@@ -336,6 +346,12 @@ int32_t tsdbGetTableGroupFromIdList(STsdbRepo *tsdb, SArray *pTableIdList, STabl
*/
void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle);
void tsdbResetQueryHandle(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond);
void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCond *pCond, STableGroupInfo* groupList);
int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist* pTableBlockInfo);
/**
* get the statistics of repo usage
* @param repo. point to the tsdbrepo
......
......@@ -135,108 +135,75 @@
#define TK_FROM 116
#define TK_VARIABLE 117
#define TK_INTERVAL 118
#define TK_FILL 119
#define TK_SLIDING 120
#define TK_ORDER 121
#define TK_BY 122
#define TK_ASC 123
#define TK_DESC 124
#define TK_GROUP 125
#define TK_HAVING 126
#define TK_LIMIT 127
#define TK_OFFSET 128
#define TK_SLIMIT 129
#define TK_SOFFSET 130
#define TK_WHERE 131
#define TK_NOW 132
#define TK_RESET 133
#define TK_QUERY 134
#define TK_ADD 135
#define TK_COLUMN 136
#define TK_TAG 137
#define TK_CHANGE 138
#define TK_SET 139
#define TK_KILL 140
#define TK_CONNECTION 141
#define TK_STREAM 142
#define TK_COLON 143
#define TK_ABORT 144
#define TK_AFTER 145
#define TK_ATTACH 146
#define TK_BEFORE 147
#define TK_BEGIN 148
#define TK_CASCADE 149
#define TK_CLUSTER 150
#define TK_CONFLICT 151
#define TK_COPY 152
#define TK_DEFERRED 153
#define TK_DELIMITERS 154
#define TK_DETACH 155
#define TK_EACH 156
#define TK_END 157
#define TK_EXPLAIN 158
#define TK_FAIL 159
#define TK_FOR 160
#define TK_IGNORE 161
#define TK_IMMEDIATE 162
#define TK_INITIALLY 163
#define TK_INSTEAD 164
#define TK_MATCH 165
#define TK_KEY 166
#define TK_OF 167
#define TK_RAISE 168
#define TK_REPLACE 169
#define TK_RESTRICT 170
#define TK_ROW 171
#define TK_STATEMENT 172
#define TK_TRIGGER 173
#define TK_VIEW 174
#define TK_COUNT 175
#define TK_SUM 176
#define TK_AVG 177
#define TK_MIN 178
#define TK_MAX 179
#define TK_FIRST 180
#define TK_LAST 181
#define TK_TOP 182
#define TK_BOTTOM 183
#define TK_STDDEV 184
#define TK_PERCENTILE 185
#define TK_APERCENTILE 186
#define TK_LEASTSQUARES 187
#define TK_HISTOGRAM 188
#define TK_DIFF 189
#define TK_SPREAD 190
#define TK_TWA 191
#define TK_INTERP 192
#define TK_LAST_ROW 193
#define TK_RATE 194
#define TK_IRATE 195
#define TK_SUM_RATE 196
#define TK_SUM_IRATE 197
#define TK_AVG_RATE 198
#define TK_AVG_IRATE 199
#define TK_TBID 200
#define TK_SEMI 201
#define TK_NONE 202
#define TK_PREV 203
#define TK_LINEAR 204
#define TK_IMPORT 205
#define TK_METRIC 206
#define TK_TBNAME 207
#define TK_JOIN 208
#define TK_METRICS 209
#define TK_INSERT 210
#define TK_INTO 211
#define TK_VALUES 212
#define TK_SESSION 119
#define TK_FILL 120
#define TK_SLIDING 121
#define TK_ORDER 122
#define TK_BY 123
#define TK_ASC 124
#define TK_DESC 125
#define TK_GROUP 126
#define TK_HAVING 127
#define TK_LIMIT 128
#define TK_OFFSET 129
#define TK_SLIMIT 130
#define TK_SOFFSET 131
#define TK_WHERE 132
#define TK_NOW 133
#define TK_RESET 134
#define TK_QUERY 135
#define TK_ADD 136
#define TK_COLUMN 137
#define TK_TAG 138
#define TK_CHANGE 139
#define TK_SET 140
#define TK_KILL 141
#define TK_CONNECTION 142
#define TK_STREAM 143
#define TK_COLON 144
#define TK_ABORT 145
#define TK_AFTER 146
#define TK_ATTACH 147
#define TK_BEFORE 148
#define TK_BEGIN 149
#define TK_CASCADE 150
#define TK_CLUSTER 151
#define TK_CONFLICT 152
#define TK_COPY 153
#define TK_DEFERRED 154
#define TK_DELIMITERS 155
#define TK_DETACH 156
#define TK_EACH 157
#define TK_END 158
#define TK_EXPLAIN 159
#define TK_FAIL 160
#define TK_FOR 161
#define TK_IGNORE 162
#define TK_IMMEDIATE 163
#define TK_INITIALLY 164
#define TK_INSTEAD 165
#define TK_MATCH 166
#define TK_KEY 167
#define TK_OF 168
#define TK_RAISE 169
#define TK_REPLACE 170
#define TK_RESTRICT 171
#define TK_ROW 172
#define TK_STATEMENT 173
#define TK_TRIGGER 174
#define TK_VIEW 175
#define TK_SEMI 176
#define TK_NONE 177
#define TK_PREV 178
#define TK_LINEAR 179
#define TK_IMPORT 180
#define TK_METRIC 181
#define TK_TBNAME 182
#define TK_JOIN 183
#define TK_METRICS 184
#define TK_INSERT 185
#define TK_INTO 186
#define TK_VALUES 187
#define TK_SPACE 300
......
......@@ -171,10 +171,10 @@ extern tDataTypeDescriptor tDataTypes[15];
bool isValidDataType(int32_t type);
void setVardataNull(char* val, int32_t type);
void setNull(char *val, int32_t type, int32_t bytes);
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void* getNullValue(int32_t type);
void setVardataNull(char* val, int32_t type);
void setNull(char *val, int32_t type, int32_t bytes);
void setNullN(char *val, int32_t type, int32_t bytes, int32_t numOfElems);
void *getNullValue(int32_t type);
void assignVal(char *val, const char *src, int32_t len, int32_t type);
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf);
......
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"num_of_records_per_req": 100,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb",
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 16,
"blocks": 8,
"precision": "ms",
"keep": 365,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb",
"child_table_exists":"no",
"childtable_count": 100,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 0,
"rows_per_tbl": 100,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
"childtable_count": 10000,
"childtable_prefix": "stb_",
"auto_create_table": "no",
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 0,
"rows_per_tbl": 100,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"}, {"type": "DOUBLE", "count":10}, {"type": "BINARY", "len": 16, "count":3}, {"type": "BINARY", "len": 32, "count":6}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":5}]
}]
}]
}
此差异已折叠。
......@@ -72,6 +72,11 @@ typedef struct SInterval {
int64_t offset;
} SInterval;
typedef struct SSessionWindow {
int64_t gap; // gap between two session window(in microseconds)
int32_t primaryColId; // primary timestamp column
} SSessionWindow;
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
......
......@@ -63,9 +63,11 @@ void httpJsonString(JsonBuf* buf, char* sVal, int32_t len);
void httpJsonOriginString(JsonBuf* buf, char* sVal, int32_t len);
void httpJsonStringForTransMean(JsonBuf* buf, char* SVal, int32_t maxLen);
void httpJsonInt64(JsonBuf* buf, int64_t num);
void httpJsonUInt64(JsonBuf* buf, uint64_t num);
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us);
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us);
void httpJsonInt(JsonBuf* buf, int32_t num);
void httpJsonUInt(JsonBuf* buf, uint32_t num);
void httpJsonFloat(JsonBuf* buf, float num);
void httpJsonDouble(JsonBuf* buf, double num);
void httpJsonNull(JsonBuf* buf);
......
......@@ -256,6 +256,12 @@ void httpJsonInt64(JsonBuf* buf, int64_t num) {
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRId64, num);
}
void httpJsonUInt64(JsonBuf* buf, uint64_t num) {
httpJsonItemToken(buf);
httpJsonTestBuf(buf, MAX_NUM_STR_SZ);
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRIu64, num);
}
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) {
char ts[35] = {0};
struct tm* ptm;
......@@ -303,6 +309,12 @@ void httpJsonInt(JsonBuf* buf, int32_t num) {
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%d", num);
}
void httpJsonUInt(JsonBuf* buf, uint32_t num) {
httpJsonItemToken(buf);
httpJsonTestBuf(buf, MAX_NUM_STR_SZ);
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%u", num);
}
void httpJsonFloat(JsonBuf* buf, float num) {
httpJsonItemToken(buf);
httpJsonTestBuf(buf, MAX_NUM_STR_SZ);
......
......@@ -162,6 +162,18 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
case TSDB_DATA_TYPE_BIGINT:
httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
break;
case TSDB_DATA_TYPE_UTINYINT:
httpJsonUInt(jsonBuf, *((uint8_t *)row[i]));
break;
case TSDB_DATA_TYPE_USMALLINT:
httpJsonUInt(jsonBuf, *((uint16_t *)row[i]));
break;
case TSDB_DATA_TYPE_UINT:
httpJsonUInt(jsonBuf, *((uint32_t *)row[i]));
break;
case TSDB_DATA_TYPE_UBIGINT:
httpJsonUInt64(jsonBuf, *((uint64_t *)row[i]));
break;
case TSDB_DATA_TYPE_FLOAT:
httpJsonFloat(jsonBuf, GET_FLOAT_VAL(row[i]));
break;
......
......@@ -26,6 +26,7 @@ extern "C" {
#include "taosdef.h"
#include "trpc.h"
#include "tvariant.h"
#include "tsdb.h"
#define TSDB_FUNC_INVALID_ID -1
#define TSDB_FUNC_COUNT 0
......@@ -70,15 +71,17 @@ extern "C" {
#define TSDB_FUNC_AVG_IRATE 34
#define TSDB_FUNC_TID_TAG 35
#define TSDB_FUNC_HISTOGRAM 36
#define TSDB_FUNC_HLL 37
#define TSDB_FUNC_MODE 38
#define TSDB_FUNC_SAMPLE 39
#define TSDB_FUNC_CEIL 40
#define TSDB_FUNC_FLOOR 41
#define TSDB_FUNC_ROUND 42
#define TSDB_FUNC_MAVG 43
#define TSDB_FUNC_CSUM 44
#define TSDB_FUNC_BLKINFO 36
#define TSDB_FUNC_HISTOGRAM 37
#define TSDB_FUNC_HLL 38
#define TSDB_FUNC_MODE 39
#define TSDB_FUNC_SAMPLE 40
#define TSDB_FUNC_CEIL 41
#define TSDB_FUNC_FLOOR 42
#define TSDB_FUNC_ROUND 43
#define TSDB_FUNC_MAVG 44
#define TSDB_FUNC_CSUM 45
#define TSDB_FUNCSTATE_SO 0x1u // single output
......@@ -214,13 +217,14 @@ typedef struct SAggFunctionInfo {
void (*xFinalize)(SQLFunctionCtx *pCtx);
void (*mergeFunc)(SQLFunctionCtx *pCtx);
int32_t (*dataReqFunc)(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId);
int32_t (*dataReqFunc)(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId);
} SAggFunctionInfo;
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, int16_t *type,
int16_t *len, int32_t *interBytes, int16_t extLength, bool isSuperTable);
int32_t isValidFunction(const char* name, int32_t len);
#define IS_STREAM_QUERY_VALID(x) (((x)&TSDB_FUNCSTATE_STREAM) != 0)
#define IS_MULTIOUTPUT(x) (((x)&TSDB_FUNCSTATE_MO) != 0)
......@@ -242,12 +246,16 @@ typedef struct STwaInfo {
STimeWindow win;
} STwaInfo;
struct SBufferWriter;
void blockDistInfoToBinary(STableBlockDist* pDist, struct SBufferWriter* bw);
void blockDistInfoFromBinary(const char* data, int32_t len, STableBlockDist* pDist);
/* global sql function array */
extern struct SAggFunctionInfo aAggs[];
extern int32_t functionCompatList[]; // compatible check array list
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const char *minval, const char *maxval);
bool topbot_datablock_filter(SQLFunctionCtx *pCtx, const char *minval, const char *maxval);
/**
* the numOfRes should be kept, since it may be used later
......@@ -258,14 +266,14 @@ bool topbot_datablock_filter(SQLFunctionCtx *pCtx, int32_t functionId, const cha
(_r)->initialized = false; \
} while (0)
static FORCE_INLINE void initResultInfo(SResultRowCellInfo *pResInfo, uint32_t bufLen) {
static FORCE_INLINE void initResultInfo(SResultRowCellInfo *pResInfo, int32_t bufLen) {
pResInfo->initialized = true; // the this struct has been initialized flag
pResInfo->complete = false;
pResInfo->hasResult = false;
pResInfo->numOfRes = 0;
memset(GET_ROWCELL_INTERBUF(pResInfo), 0, (size_t)bufLen);
memset(GET_ROWCELL_INTERBUF(pResInfo), 0, bufLen);
}
#ifdef __cplusplus
......
......@@ -12,8 +12,8 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_QUERYEXECUTOR_H
#define TDENGINE_QUERYEXECUTOR_H
#ifndef TDENGINE_QEXECUTOR_H
#define TDENGINE_QEXECUTOR_H
#include "os.h"
......@@ -37,30 +37,24 @@ typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int
#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u)
#define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP)
#define SET_STABLE_QUERY_OVER(_q) ((_q)->tableIndex = (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
#define IS_STASBLE_QUERY_OVER(_q) ((_q)->tableIndex >= (int32_t)((_q)->tableqinfoGroupInfo.numOfTables))
#define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index)))
#define GET_NUM_OF_RESULTS(_r) (((_r)->outputBuf) == NULL? 0:((_r)->outputBuf)->info.rows)
enum {
// when query starts to execute, this status will set
QUERY_NOT_COMPLETED = 0x1u,
/* result output buffer is full, current query is paused.
* this status is only exist in group-by clause and diff/add/division/multiply/ query.
*/
QUERY_RESBUF_FULL = 0x2u,
/* query is over
* 1. this status is used in one row result query process, e.g., count/sum/first/last/ avg...etc.
* 2. when all data within queried time window, it is also denoted as query_completed
*/
QUERY_COMPLETED = 0x4u,
QUERY_COMPLETED = 0x2u,
/* when the result is not completed return to client, this status will be
* usually used in case of interval query with interpolation option
*/
QUERY_OVER = 0x8u,
QUERY_OVER = 0x4u,
};
typedef struct SResultRowPool {
......@@ -86,13 +80,13 @@ typedef struct SSqlGroupbyExpr {
typedef struct SResultRow {
int32_t pageId; // pageId & rowId is the position of current result in disk-based output buffer
int32_t rowId:29; // row index in buffer page
int32_t offset:29; // row index in buffer page
bool startInterp; // the time window start timestamp has done the interpolation already.
bool endInterp; // the time window end timestamp has done the interpolation already.
bool closed; // this result status: closed or opened
uint32_t numOfRows; // number of rows of current time window
SResultRowCellInfo* pCellInfo; // For each result column, there is a resultInfo
union {STimeWindow win; char* key;}; // start key of current time window
union {STimeWindow win; char* key;}; // start key of current result row
} SResultRow;
typedef struct SGroupResInfo {
......@@ -106,12 +100,11 @@ typedef struct SGroupResInfo {
* If the number of generated results is greater than this value,
* query query will be halt and return results to client immediate.
*/
typedef struct SResultRec {
typedef struct SRspResultInfo {
int64_t total; // total generated result size in rows
int64_t rows; // current result set size in rows
int64_t capacity; // capacity of current result output buffer
int32_t capacity; // capacity of current result output buffer
int32_t threshold; // result size threshold in rows.
} SResultRec;
} SRspResultInfo;
typedef struct SResultRowInfo {
SResultRow** pResult; // result list
......@@ -138,7 +131,6 @@ typedef struct SSingleColumnFilterInfo {
typedef struct STableQueryInfo {
TSKEY lastKey;
int32_t groupIndex; // group id in table list
int16_t queryRangeSet; // denote if the query range is set, only available for interval query
tVariant tag;
STimeWindow win;
STSCursor cur;
......@@ -179,82 +171,136 @@ typedef struct {
SArray* pResult; // SArray<SStddevInterResult>
} SInterResult;
typedef struct SSDataBlock {
SDataStatis *pBlockStatis;
SArray *pDataBlock;
SDataBlockInfo info;
} SSDataBlock;
typedef struct SQuery {
SLimitVal limit;
bool stableQuery; // super table query or not
bool topBotQuery; // TODO used bitwise flag
bool groupbyColumn; // denote if this is a groupby normal column query
bool hasTagResults; // if there are tag values in final result or not
bool timeWindowInterpo;// if the time window start/end required interpolation
bool queryBlockDist; // if query data block distribution
bool stabledev; // super table stddev query
int32_t interBufSize; // intermediate buffer sizse
SOrderVal order;
int16_t numOfCols;
int16_t numOfTags;
SOrderVal order;
STimeWindow window;
SInterval interval;
SSessionWindow sw;
int16_t precision;
int16_t numOfOutput;
int16_t fillType;
int16_t checkResultBuf; // check if the buffer is full during scan each block
SLimitVal limit;
int32_t srcRowSize; // todo extract struct
int32_t resultRowSize;
int32_t intermediateResultRowSize; // intermediate result row size, in case of top-k query.
int32_t maxSrcColumnSize;
int32_t tagLen; // tag value length of current query
SSqlGroupbyExpr* pGroupbyExpr;
SExprInfo* pExpr1;
SExprInfo* pExpr2;
int32_t numOfExpr2;
SColumnInfo* colList;
SColumnInfo* tagColList;
int32_t numOfFilterCols;
int64_t* fillVal;
uint32_t status; // query status
SResultRec rec;
int32_t pos;
tFilePage** sdata;
STableQueryInfo* current;
int32_t numOfCheckedBlocks; // number of check data blocks
SOrderedPrjQueryInfo prjInfo; // limit value for each vgroup, only available in global order projection query.
SSingleColumnFilterInfo* pFilterInfo;
STableQueryInfo* current;
void* tsdb;
SMemRef memRef;
STableGroupInfo tableGroupInfo; // table <tid, last_key> list SArray<STableKeyInfo>
int32_t vgId;
} SQuery;
typedef SSDataBlock* (*__operator_fn_t)(void* param);
typedef void (*__optr_cleanup_fn_t)(void* param, int32_t num);
struct SOperatorInfo;
typedef struct SQueryRuntimeEnv {
jmp_buf env;
SQuery* pQuery;
SQLFunctionCtx* pCtx;
int32_t numOfRowsPerPage;
uint16_t* offset;
uint16_t scanFlag; // denotes reversed scan of data or not
SFillInfo* pFillInfo;
SResultRowInfo resultRowInfo;
SQueryCostInfo summary;
void* pQueryHandle;
void* pSecQueryHandle; // another thread for
bool stableQuery; // super table query or not
bool topBotQuery; // TODO used bitwise flag
bool groupbyColumn; // denote if this is a groupby normal column query
bool hasTagResults; // if there are tag values in final result or not
bool timeWindowInterpo;// if the time window start/end required interpolation
bool queryWindowIdentical; // all query time windows are identical for all tables in one group
bool queryBlockDist; // if query data block distribution
bool stabledev; // super table stddev query
int32_t interBufSize; // intermediate buffer sizse
int32_t prevGroupId; // previous executed group id
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
SHashObj* pResultRowHashTable; // quick locate the window object for each result
char* keyBuf; // window key buffer
SResultRowPool* pool; // window result object pool
int32_t* rowCellInfoOffset;// offset value for each row result cell info
char** prevRow;
SArray* prevResult; // intermediate result, SArray<SInterResult>
STSBuf* pTsBuf; // timestamp filter list
STSCursor cur;
char* tagVal; // tag value of current data block
SArithmeticSupport *sasArray;
jmp_buf env;
SQuery* pQuery;
uint32_t status; // query status
void* qinfo;
uint8_t scanFlag; // denotes reversed scan of data or not
void* pQueryHandle;
int32_t prevGroupId; // previous executed group id
SDiskbasedResultBuf* pResultBuf; // query result buffer based on blocked-wised disk file
SHashObj* pResultRowHashTable; // quick locate the window object for each result
char* keyBuf; // window key buffer
SResultRowPool* pool; // window result object pool
char** prevRow;
SArray* prevResult; // intermediate result, SArray<SInterResult>
STSBuf* pTsBuf; // timestamp filter list
STSCursor cur;
char* tagVal; // tag value of current data block
SArithmeticSupport *sasArray;
SSDataBlock *outputBuf;
STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray<STableQueryInfo*> structure
struct SOperatorInfo *proot;
struct SOperatorInfo *pTableScanner; // table scan operator
SGroupResInfo groupResInfo;
int64_t currentOffset; // dynamic offset value
SRspResultInfo resultInfo;
SHashObj *pTableRetrieveTsMap;
} SQueryRuntimeEnv;
enum {
OP_IN_EXECUTING = 1,
OP_RES_TO_RETURN = 2,
OP_EXEC_DONE = 3,
};
enum OPERATOR_TYPE_E {
OP_TableScan = 1,
OP_DataBlocksOptScan = 2,
OP_TableSeqScan = 3,
OP_TagScan = 4,
OP_TableBlockInfoScan= 5,
OP_Aggregate = 6,
OP_Arithmetic = 7,
OP_Groupby = 8,
OP_Limit = 9,
OP_Offset = 10,
OP_TimeWindow = 11,
OP_SessionWindow = 12,
OP_Fill = 13,
OP_MultiTableAggregate = 14,
OP_MultiTableTimeInterval = 15,
};
typedef struct SOperatorInfo {
uint8_t operatorType;
bool blockingOptr; // block operator or not
uint8_t status; // denote if current operator is completed
int32_t numOfOutput; // number of columns of the current operator results
char *name; // name, used to show the query execution plan
void *info; // extension attribution
SExprInfo *pExpr;
SQueryRuntimeEnv *pRuntimeEnv;
struct SOperatorInfo *upstream;
__operator_fn_t exec;
__optr_cleanup_fn_t cleanup;
} SOperatorInfo;
enum {
QUERY_RESULT_NOT_READY = 1,
QUERY_RESULT_READY = 2,
......@@ -263,23 +309,11 @@ enum {
typedef struct SQInfo {
void* signature;
uint64_t qId;
int32_t code; // error code to returned to client
int64_t owner; // if it is in execution
void* tsdb;
SMemRef memRef;
int32_t vgId;
STableGroupInfo tableGroupInfo; // table <tid, last_key> list SArray<STableKeyInfo>
STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray<STableQueryInfo*> structure
SQueryRuntimeEnv runtimeEnv;
SHashObj* arrTableIdInfo;
int32_t groupIndex;
int32_t code; // error code to returned to client
int64_t owner; // if it is in execution
/*
* the query is executed position on which meter of the whole list.
* when the index reaches the last one of the list, it means the query is completed.
*/
int32_t tableIndex;
SGroupResInfo groupResInfo;
SQueryRuntimeEnv runtimeEnv;
SQuery query;
void* pBuf; // allocated buffer for STableQueryInfo, sizeof(STableQueryInfo)*numOfTables;
pthread_mutex_t lock; // used to synchronize the rsp/query threads
......@@ -288,6 +322,7 @@ typedef struct SQInfo {
void* rspContext; // response context
int64_t startExecTs; // start to exec timestamp
char* sql; // query sql string
SQueryCostInfo summary;
} SQInfo;
typedef struct SQueryParam {
......@@ -306,10 +341,93 @@ typedef struct SQueryParam {
SSqlGroupbyExpr *pGroupbyExpr;
} SQueryParam;
typedef struct STableScanInfo {
void *pQueryHandle;
int32_t numOfBlocks;
int32_t numOfSkipped;
int32_t numOfBlockStatis;
int64_t numOfRows;
int32_t order; // scan order
int32_t times; // repeat counts
int32_t current;
int32_t reverseTimes; // 0 by default
SQLFunctionCtx *pCtx; // next operator query context
SResultRowInfo *pResultRowInfo;
int32_t *rowCellInfoOffset;
SExprInfo *pExpr;
SSDataBlock block;
bool loadExternalRows; // load external rows (prev & next rows)
int32_t numOfOutput;
int64_t elapsedTime;
int32_t tableIndex;
} STableScanInfo;
typedef struct STagScanInfo {
SColumnInfo* pCols;
SSDataBlock* pRes;
int32_t totalTables;
int32_t currentIndex;
} STagScanInfo;
typedef struct SOptrBasicInfo {
SResultRowInfo resultRowInfo;
int32_t *rowCellInfoOffset; // offset value for each row result cell info
SQLFunctionCtx *pCtx;
SSDataBlock *pRes;
} SOptrBasicInfo;
typedef struct SOptrBasicInfo STableIntervalOperatorInfo;
typedef struct SAggOperatorInfo {
SOptrBasicInfo binfo;
uint32_t seed;
} SAggOperatorInfo;
typedef struct SArithOperatorInfo {
SOptrBasicInfo binfo;
int32_t bufCapacity;
uint32_t seed;
} SArithOperatorInfo;
typedef struct SLimitOperatorInfo {
int64_t limit;
int64_t total;
} SLimitOperatorInfo;
typedef struct SOffsetOperatorInfo {
int64_t offset;
} SOffsetOperatorInfo;
typedef struct SFillOperatorInfo {
SFillInfo *pFillInfo;
SSDataBlock *pRes;
int64_t totalInputRows;
} SFillOperatorInfo;
typedef struct SGroupbyOperatorInfo {
SOptrBasicInfo binfo;
int32_t colIndex;
char *prevData; // previous group by value
} SGroupbyOperatorInfo;
typedef struct SSWindowOperatorInfo {
SOptrBasicInfo binfo;
STimeWindow curWindow; // current time window
TSKEY prevTs; // previous timestamp
int32_t numOfRows; // number of rows
int32_t start; // start row index
} SSWindowOperatorInfo;
void freeParam(SQueryParam *param);
int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param);
int32_t createQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo, SSqlFuncMsg **pExprMsg,
SColumnInfo* pTagCols);
int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo,
SSqlFuncMsg **pExprMsg, SExprInfo *prevExpr);
SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code);
SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
SExprInfo *pSecExprs, STableGroupInfo *pTableGroupInfo, SColumnInfo* pTagCols, bool stableQuery, char* sql, uint64_t *qId);
......@@ -319,13 +437,9 @@ void freeColumnFilterInfo(SColumnFilterInfo* pFilter, int32_t numOfFilters);
bool isQueryKilled(SQInfo *pQInfo);
int32_t checkForQueryBuf(size_t numOfTables);
bool doBuildResCheck(SQInfo* pQInfo);
void setQueryStatus(SQuery *pQuery, int8_t status);
void setQueryStatus(SQueryRuntimeEnv *pRuntimeEnv, int8_t status);
bool onlyQueryTags(SQuery* pQuery);
void buildTagQueryResult(SQInfo *pQInfo);
void stableQueryImpl(SQInfo *pQInfo);
void buildTableBlockDistResult(SQInfo *pQInfo);
void tableQueryImpl(SQInfo *pQInfo);
bool isValidQInfo(void *param);
int32_t doDumpQueryResult(SQInfo *pQInfo, char *data);
......@@ -337,4 +451,4 @@ void freeQInfo(SQInfo *pQInfo);
int32_t getMaximumIdleDurationSec();
#endif // TDENGINE_QUERYEXECUTOR_H
#endif // TDENGINE_QEXECUTOR_H
......@@ -24,6 +24,8 @@ extern "C" {
#include "qExtbuffer.h"
#include "taosdef.h"
struct SSDataBlock;
typedef struct {
STColumn col; // column info
int16_t functionId; // sql function id
......@@ -78,7 +80,7 @@ void* taosDestroyFillInfo(SFillInfo *pFillInfo);
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
void taosFillSetDataBlockFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput);
void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const struct SSDataBlock* pInput);
void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage* pInput);
......@@ -88,7 +90,7 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType);
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity);
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, void** output, int32_t capacity);
#ifdef __cplusplus
}
......
......@@ -55,7 +55,6 @@ typedef struct SResultBufStatis {
} SResultBufStatis;
typedef struct SDiskbasedResultBuf {
int32_t numOfRowsPerPage;
int32_t numOfPages;
int64_t totalBufSize;
int64_t fileSize; // disk file size
......@@ -77,7 +76,7 @@ typedef struct SDiskbasedResultBuf {
SResultBufStatis statis;
} SDiskbasedResultBuf;
#define DEFAULT_INTERN_BUF_PAGE_SIZE (256L) // in bytes
#define DEFAULT_INTERN_BUF_PAGE_SIZE (1024L) // in bytes
#define PAGE_INFO_INITIALIZER (SPageDiskInfo){-1, -1}
/**
......@@ -89,8 +88,7 @@ typedef struct SDiskbasedResultBuf {
* @param handle
* @return
*/
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t rowSize, int32_t pagesize,
int32_t inMemBufSize, const void* handle);
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, const void* handle);
/**
*
......@@ -101,13 +99,6 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t ro
*/
tFilePage* getNewDataBuf(SDiskbasedResultBuf* pResultBuf, int32_t groupId, int32_t* pageId);
/**
*
* @param pResultBuf
* @return
*/
size_t getNumOfRowsPerPage(const SDiskbasedResultBuf* pResultBuf);
/**
*
* @param pResultBuf
......
此差异已折叠。
......@@ -112,13 +112,11 @@ STSBuf* tsBufClone(STSBuf* pTSBuf);
STSGroupBlockInfo* tsBufGetGroupBlockInfo(STSBuf* pTSBuf, int32_t id);
void tsBufFlush(STSBuf* pTSBuf);
void tsBufFlush(STSBuf* pTSBuf);
void tsBufResetPos(STSBuf* pTSBuf);
STSElem tsBufGetElem(STSBuf* pTSBuf);
bool tsBufNextPos(STSBuf* pTSBuf);
STSElem tsBufGetElem(STSBuf* pTSBuf);
STSElem tsBufGetElemStartPos(STSBuf* pTSBuf, int32_t id, tVariant* tag);
STSCursor tsBufGetCursor(STSBuf* pTSBuf);
......
......@@ -27,7 +27,7 @@
#define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t))
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pExpr1[1].base.arg->argValue.i64:1)
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.arg->argValue.i64:1)
int32_t getOutputInterResultBufSize(SQuery* pQuery);
......@@ -44,22 +44,18 @@ void closeResultRow(SResultRowInfo* pResultRowInfo, int32_t slot);
bool isResultRowClosed(SResultRowInfo *pResultRowInfo, int32_t slot);
void clearResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResultRow, int16_t type);
SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index);
SResultRowCellInfo* getResultCell(const SResultRow* pRow, int32_t index, int32_t* offset);
static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) {
assert(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size);
return pResultRowInfo->pResult[slot];
}
static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SResultRow *pResult,
tFilePage* page) {
assert(pResult != NULL && pRuntimeEnv != NULL);
static FORCE_INLINE char *getPosInResultPage(SQuery *pQuery, tFilePage* page, int32_t rowOffset, int16_t offset) {
assert(rowOffset >= 0 && pQuery != NULL);
SQuery *pQuery = pRuntimeEnv->pQuery;
int32_t realRowId = (int32_t)(pResult->rowId * GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pRuntimeEnv->topBotQuery, pRuntimeEnv->stableQuery));
return ((char *)page->data) + pRuntimeEnv->offset[columnIndex] * pRuntimeEnv->numOfRowsPerPage +
pQuery->pExpr1[columnIndex].bytes * realRowId;
int32_t numOfRows = (int32_t)GET_ROW_PARAM_FOR_MULTIOUTPUT(pQuery, pQuery->topBotQuery, pQuery->stableQuery);
return ((char *)page->data) + rowOffset + offset * numOfRows;
}
bool isNullOperator(SColumnFilterElem *pFilter, const char* minval, const char* maxval, int16_t type);
......@@ -74,8 +70,6 @@ void* destroyResultRowPool(SResultRowPool* p);
int32_t getNumOfAllocatedResultRows(SResultRowPool* p);
int32_t getNumOfUsedResultRows(SResultRowPool* p);
bool isPointInterpoQuery(SQuery *pQuery);
typedef struct {
SArray* pResult; // SArray<SResPair>
int32_t colId;
......@@ -85,12 +79,14 @@ void interResToBinary(SBufferWriter* bw, SArray* pRes, int32_t tagLen);
SArray* interResFromBinary(const char* data, int32_t len);
void freeInterResult(void* param);
void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo, int32_t offset);
void initGroupResInfo(SGroupResInfo* pGroupResInfo, SResultRowInfo* pResultInfo);
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo);
bool hasRemainDataInCurrentGroup(SGroupResInfo* pGroupResInfo);
bool hasRemainData(SGroupResInfo* pGroupResInfo);
bool incNextGroup(SGroupResInfo* pGroupResInfo);
int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo);
int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQInfo *pQInfo);
int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, int32_t* offset);
#endif // TDENGINE_QUERYUTIL_H
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -23,18 +23,19 @@
#include "qFill.h"
#include "qExtbuffer.h"
#include "queryLog.h"
#include "qExecutor.h"
#define FILL_IS_ASC_FILL(_f) ((_f)->order == TSDB_ORDER_ASC)
#define DO_INTERPOLATION(_v1, _v2, _k1, _k2, _k) ((_v1) + ((_v2) - (_v1)) * (((double)(_k)) - ((double)(_k1))) / (((double)(_k2)) - ((double)(_k1))))
static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t genRows) {
static void setTagsValue(SFillInfo* pFillInfo, void** data, int32_t genRows) {
for(int32_t j = 0; j < pFillInfo->numOfCols; ++j) {
SFillColInfo* pCol = &pFillInfo->pFillCol[j];
if (TSDB_COL_IS_NORMAL_COL(pCol->flag)) {
continue;
}
char* val1 = elePtrAt(data[j]->data, pCol->col.bytes, genRows);
char* val1 = elePtrAt(data[j], pCol->col.bytes, genRows);
assert(pCol->tagIndex >= 0 && pCol->tagIndex < pFillInfo->numOfTags);
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
......@@ -44,17 +45,17 @@ static void setTagsValue(SFillInfo* pFillInfo, tFilePage** data, int32_t genRows
}
}
static void setNullValueForRow(SFillInfo* pFillInfo, tFilePage** data, int32_t numOfCol, int32_t rowIndex) {
static void setNullValueForRow(SFillInfo* pFillInfo, void** data, int32_t numOfCol, int32_t rowIndex) {
// the first are always the timestamp column, so start from the second column.
for (int32_t i = 1; i < numOfCol; ++i) {
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
char* output = elePtrAt(data[i]->data, pCol->col.bytes, rowIndex);
char* output = elePtrAt(data[i], pCol->col.bytes, rowIndex);
setNull(output, pCol->col.type, pCol->col.bytes);
}
}
static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** srcData, int64_t ts, bool outOfBound) {
static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData, int64_t ts, bool outOfBound) {
char* prev = pFillInfo->prevValues;
char* next = pFillInfo->nextValues;
......@@ -63,7 +64,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
// set the primary timestamp column value
int32_t index = pFillInfo->numOfCurrent;
char* val = elePtrAt(data[0]->data, TSDB_KEYSIZE, index);
char* val = elePtrAt(data[0], TSDB_KEYSIZE, index);
*(TSKEY*) val = pFillInfo->currentKey;
// set the other values
......@@ -77,7 +78,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
continue;
}
char* output = elePtrAt(data[i]->data, pCol->col.bytes, index);
char* output = elePtrAt(data[i], pCol->col.bytes, index);
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
}
} else { // no prev value yet, set the value for NULL
......@@ -93,7 +94,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
continue;
}
char* output = elePtrAt(data[i]->data, pCol->col.bytes, index);
char* output = elePtrAt(data[i], pCol->col.bytes, index);
assignVal(output, p + pCol->col.offset, pCol->col.bytes, pCol->col.type);
}
} else { // no prev value yet, set the value for NULL
......@@ -111,7 +112,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
int16_t type = pCol->col.type;
int16_t bytes = pCol->col.bytes;
char *val1 = elePtrAt(data[i]->data, pCol->col.bytes, index);
char *val1 = elePtrAt(data[i], pCol->col.bytes, index);
if (type == TSDB_DATA_TYPE_BINARY|| type == TSDB_DATA_TYPE_NCHAR || type == TSDB_DATA_TYPE_BOOL) {
setNull(val1, pCol->col.type, bytes);
continue;
......@@ -132,7 +133,7 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, tFilePage** data, char** sr
continue;
}
char* val1 = elePtrAt(data[i]->data, pCol->col.bytes, index);
char* val1 = elePtrAt(data[i], pCol->col.bytes, index);
assignVal(val1, (char*)&pCol->fillVal.i, pCol->col.bytes, pCol->col.type);
}
}
......@@ -162,7 +163,7 @@ static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, char** srcData, char* bu
}
}
static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t outputRows) {
static int32_t fillResultImpl(SFillInfo* pFillInfo, void** data, int32_t outputRows) {
pFillInfo->numOfCurrent = 0;
char** srcData = pFillInfo->pData;
......@@ -213,7 +214,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t ou
continue;
}
char* output = elePtrAt(data[i]->data, pCol->col.bytes, pFillInfo->numOfCurrent);
char* output = elePtrAt(data[i], pCol->col.bytes, pFillInfo->numOfCurrent);
char* src = elePtrAt(srcData[i], pCol->col.bytes, pFillInfo->index);
if (i == 0 || (pCol->functionId != TSDB_FUNC_COUNT && !isNull(src, pCol->col.type)) ||
......@@ -255,7 +256,7 @@ static int32_t fillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t ou
return pFillInfo->numOfCurrent;
}
static int64_t appendFilledResult(SFillInfo* pFillInfo, tFilePage** output, int64_t resultCapacity) {
static int64_t appendFilledResult(SFillInfo* pFillInfo, void** output, int64_t resultCapacity) {
/*
* These data are generated according to fill strategy, since the current timestamp is out of the time window of
* real result set. Note that we need to keep the direct previous result rows, to generated the filled data.
......@@ -278,7 +279,7 @@ static int32_t setTagColumnInfo(SFillInfo* pFillInfo, int32_t numOfCols, int32_t
int32_t k = 0;
for (int32_t i = 0; i < numOfCols; ++i) {
SFillColInfo* pColInfo = &pFillInfo->pFillCol[i];
pFillInfo->pData[i] = calloc(1, pColInfo->col.bytes * capacity);
pFillInfo->pData[i] = NULL;
if (TSDB_COL_IS_TAG(pColInfo->flag)) {
bool exists = false;
......@@ -356,6 +357,10 @@ SFillInfo* taosCreateFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int3
pFillInfo->rowSize = setTagColumnInfo(pFillInfo, pFillInfo->numOfCols, pFillInfo->alloc);
assert(pFillInfo->rowSize > 0);
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
pFillInfo->pData[i] = malloc(pFillInfo->pFillCol[i].col.bytes * pFillInfo->alloc);
}
return pFillInfo;
}
......@@ -375,11 +380,16 @@ void* taosDestroyFillInfo(SFillInfo* pFillInfo) {
tfree(pFillInfo->prevValues);
tfree(pFillInfo->nextValues);
tfree(pFillInfo->pTags);
for(int32_t i = 0; i < pFillInfo->numOfTags; ++i) {
tfree(pFillInfo->pTags[i].tagVal);
}
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
tfree(pFillInfo->pData[i]);
}
tfree(pFillInfo->pTags);
tfree(pFillInfo->pData);
tfree(pFillInfo->pFillCol);
......@@ -413,10 +423,19 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
}
}
// copy the data into source data buffer
void taosFillSetDataBlockFromFilePage(SFillInfo* pFillInfo, const tFilePage** pInput) {
void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput) {
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
memcpy(pFillInfo->pData[i], pInput[i]->data, pFillInfo->numOfRows * pFillInfo->pFillCol[i].col.bytes);
SColumnInfoData* pColData = taosArrayGet(pInput->pDataBlock, i);
// pFillInfo->pData[i] = pColData->pData;
if (pInput->info.rows > pFillInfo->alloc) {
char* t = realloc(pFillInfo->pData[i], pColData->info.bytes * pInput->info.rows);
assert(t != NULL);
pFillInfo->pData[i] = t;
pFillInfo->alloc = pInput->info.rows;
}
memcpy(pFillInfo->pData[i], pColData->pData, pColData->info.bytes * pInput->info.rows);
}
}
......@@ -427,12 +446,20 @@ void taosFillCopyInputDataFromOneFilePage(SFillInfo* pFillInfo, const tFilePage*
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
const char* data = pInput->data + pCol->col.offset * pInput->num;
memcpy(pFillInfo->pData[i], data, (size_t)(pInput->num * pCol->col.bytes));
if (pInput->num > pFillInfo->alloc) {
char* t = realloc(pFillInfo->pData[i], (size_t)(pCol->col.bytes * pInput->num));
assert(t != NULL);
pFillInfo->pData[i] = t;
pFillInfo->alloc = (int32_t)pInput->num;
}
memcpy(pFillInfo->pData[i], data, (size_t)(pCol->col.bytes * pInput->num));
if (TSDB_COL_IS_TAG(pCol->flag)) { // copy the tag value to tag value buffer
SFillTagColInfo* pTag = &pFillInfo->pTags[pCol->tagIndex];
assert (pTag->col.colId == pCol->col.colId);
memcpy(pTag->tagVal, data, pCol->col.bytes);
memcpy(pTag->tagVal, data, pCol->col.bytes); // TODO not memcpy??
}
}
}
......@@ -490,7 +517,7 @@ int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint*
return TSDB_CODE_SUCCESS;
}
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, tFilePage** output, int32_t capacity) {
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, void** output, int32_t capacity) {
int32_t remain = taosNumOfRemainRows(pFillInfo);
int64_t numOfRes = getNumOfResultsAfterFillGap(pFillInfo, pFillInfo->end, capacity);
......
......@@ -254,7 +254,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
resetSlotInfo(pBucket);
int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bytes, pBucket->bufPageSize, pBucket->bufPageSize * 512, NULL);
int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, NULL);
if (ret != TSDB_CODE_SUCCESS) {
tMemBucketDestroy(pBucket);
return NULL;
......
......@@ -9,8 +9,7 @@
#define GET_DATA_PAYLOAD(_p) ((char *)(_p)->pData + POINTER_BYTES)
#define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages)
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t rowSize, int32_t pagesize,
int32_t inMemBufSize, const void* handle) {
int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, const void* handle) {
*pResultBuf = calloc(1, sizeof(SDiskbasedResultBuf));
SDiskbasedResultBuf* pResBuf = *pResultBuf;
......@@ -31,7 +30,6 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t ro
// at least more than 2 pages must be in memory
assert(inMemBufSize >= pagesize * 2);
pResBuf->numOfRowsPerPage = (pagesize - sizeof(tFilePage)) / rowSize;
pResBuf->lruList = tdListNew(POINTER_BYTES);
// init id hash table
......@@ -387,8 +385,6 @@ void releaseResBufPageInfo(SDiskbasedResultBuf* pResultBuf, SPageInfo* pi) {
pResultBuf->statis.releasePages += 1;
}
size_t getNumOfRowsPerPage(const SDiskbasedResultBuf* pResultBuf) { return pResultBuf->numOfRowsPerPage; }
size_t getNumOfResultBufGroupId(const SDiskbasedResultBuf* pResultBuf) { return taosHashGetSize(pResultBuf->groupSet); }
size_t getResBufSize(const SDiskbasedResultBuf* pResultBuf) { return (size_t)pResultBuf->totalBufSize; }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -10,7 +10,7 @@ namespace {
// simple test
void simpleTest() {
SDiskbasedResultBuf* pResultBuf = NULL;
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 64, 1024, 4096, NULL);
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4096, NULL);
int32_t pageId = 0;
int32_t groupId = 0;
......@@ -52,7 +52,7 @@ void simpleTest() {
void writeDownTest() {
SDiskbasedResultBuf* pResultBuf = NULL;
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 64, 1024, 4*1024, NULL);
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, NULL);
int32_t pageId = 0;
int32_t writePageId = 0;
......@@ -99,7 +99,7 @@ void writeDownTest() {
void recyclePageTest() {
SDiskbasedResultBuf* pResultBuf = NULL;
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 64, 1024, 4*1024, NULL);
int32_t ret = createDiskbasedResultBuffer(&pResultBuf, 1024, 4*1024, NULL);
int32_t pageId = 0;
int32_t writePageId = 0;
......
......@@ -15,7 +15,13 @@
#include "tsdbint.h"
#define TSDB_MAX_SUBBLOCKS 8
#define TSDB_KEY_FID(key, days, precision) ((key) / tsMsPerDay[(precision)] / (days))
static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) {
if (key < 0) {
return (int)(-((-key) / tsMsPerDay[precision] / days + 1));
} else {
return (int)((key / tsMsPerDay[precision] / days));
}
}
typedef struct {
SRtn rtn; // retention snapshot
......
此差异已折叠。
此差异已折叠。
......@@ -25,7 +25,8 @@ extern "C" {
#define TARRAY_MIN_SIZE 8
#define TARRAY_GET_ELEM(array, index) ((void*)((char*)((array)->pData) + (index) * (array)->elemSize))
#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize)
#define TARRAY_ELEM_IDX(array, ele) (POINTER_DISTANCE(ele, (array)->pData) / (array)->elemSize)
#define TARRAY_GET_START(array) ((array)->pData)
typedef struct SArray {
size_t size;
......
......@@ -73,14 +73,14 @@ int main( int argc, char** argv ) {
}
*/
typedef struct {
typedef struct SBufferReader {
bool endian;
const char* data;
size_t pos;
size_t size;
} SBufferReader;
typedef struct {
typedef struct SBufferWriter {
bool endian;
char* data;
size_t pos;
......
......@@ -442,4 +442,4 @@ void vnodeWaitReadCompleted(SVnodeObj *pVnode) {
vTrace("vgId:%d, queued rmsg num:%d", pVnode->vgId, pVnode->queuedRMsg);
taosMsleep(10);
}
}
\ No newline at end of file
}
......@@ -1785,7 +1785,7 @@ class TdSuperTable:
'top(speed, 50)', # TODO: not supported?
'bottom(speed, 50)', # TODO: not supported?
'apercentile(speed, 10)', # TODO: TD-1316
'last_row(speed)',
# 'last_row(speed)', # TODO: commented out per TD-3231, we should re-create
# Transformation Functions
# 'diff(speed)', # TODO: no supported?!
'spread(speed)'
......
此差异已折叠。
此差异已折叠。
......@@ -78,9 +78,11 @@ python3 test.py -f query/queryInterval.py
python3 test.py -f query/queryFillTest.py
# tools
python3 test.py -f tools/lowaTest.py
python3 test.py -f tools/taosdemoTest.py
python3 test.py -f tools/taosdemoTestWithoutMetric.py
python3 test.py -f tools/taosdemoTestLimitOffset.py
python3 test.py -f tools/taosdumpTest.py
python3 test.py -f tools/lowaTest.py
#python3 test.py -f tools/taosdemoTest2.py
# subscribe
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -93,6 +93,7 @@ $halfTbNum = $tbNum / 2
$nchar = 'nchar . $c
$nchar = $nchar . '
$ts = $ts + 1
sql insert into $tb5 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $tb6 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $tb7 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $tb8 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar ) $tb9 values ( $ts , NULL , $c , NULL , $c , NULL , $c , NULL, NULL , $nchar )
$x = $x + 1
endw
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册