提交 44ad2279 编写于 作者: C Cary Xu

Merge branch 'develop' into feature/TS-238-D

......@@ -82,6 +82,10 @@ tests/comparisonTest/opentsdb/opentsdbtest/.settings/
tests/examples/JDBC/JDBCDemo/.classpath
tests/examples/JDBC/JDBCDemo/.project
tests/examples/JDBC/JDBCDemo/.settings/
tests/script/api/batchprepare
tests/script/api/stmt
tests/script/api/stmtBatchTest
tests/script/api/stmtTest
# Emacs
# -*- mode: gitignore; -*-
......
......@@ -299,8 +299,8 @@ keepColumnName 1
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
# tsdbMetaCompactRatio 0
# default string type used for storing JSON String, options can be binary/nchar, default is binary
# defaultJSONStrType binary
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
# defaultJSONStrType nchar
# force TCP transmission
# rpcForceTcp 0
......
......@@ -194,6 +194,7 @@ fi
if [[ "$dbName" == "pro" ]]; then
sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c
sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c
fi
echo "build ${pagMode} package ..."
......
......@@ -119,6 +119,7 @@ typedef struct SBlockKeyInfo {
int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *len);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
int32_t tscCreateDataBlockData(STableDataBlocks* dataBuf, size_t defaultSize, int32_t rowSize, int32_t startOffset);
void tscDestroyDataBlock(SSqlObj *pSql, STableDataBlocks* pDataBlock, bool removeMeta);
void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks* dataBuf);
int tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKeyInfo);
......
......@@ -237,7 +237,7 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
return;
}
if (pRes->qId == 0) {
if (pRes->qId == 0 && pSql->cmd.command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
tscError("qhandle is invalid");
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
tscAsyncResultOnError(pSql);
......
......@@ -967,7 +967,6 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
if (pOperator->pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_DESC) {
SWAP(w->skey, w->ekey, TSKEY);
assert(w->skey <= w->ekey);
}
}
}
......
......@@ -195,8 +195,9 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch
}
tfree(value);
pVal->key = tcalloc(sizeof(key), 1);
pVal->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1);
memcpy(pVal->key, key, sizeof(key));
addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key));
*num_kvs += 1;
*index = cur + 1;
......@@ -881,8 +882,9 @@ static int32_t parseMetricValueFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *nu
return ret;
}
pVal->key = tcalloc(sizeof(key), 1);
pVal->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1);
memcpy(pVal->key, key, sizeof(key));
addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key));
*num_kvs += 1;
return TSDB_CODE_SUCCESS;
......
......@@ -48,12 +48,14 @@ typedef struct SMultiTbStmt {
bool nameSet;
bool tagSet;
bool subSet;
bool tagColSet;
uint64_t currentUid;
char *sqlstr;
uint32_t tbNum;
SStrToken tbname;
SStrToken stbname;
SStrToken values;
SStrToken tagCols;
SArray *tags;
STableDataBlocks *lastBlock;
SHashObj *pTableHash;
......@@ -1250,6 +1252,12 @@ static void insertBatchClean(STscStmt* pStmt) {
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pSql, pCmd->insertParam.pDataBlocks);
pCmd->insertParam.numOfTables = 0;
STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
while(p) {
tfree((*p)->pData);
p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p);
}
taosHashClear(pCmd->insertParam.pTableBlockHashList);
tscFreeSqlResult(pSql);
tscFreeSubobj(pSql);
......@@ -1342,11 +1350,42 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
}
pStmt->mtb.stbname = sToken;
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || ((sToken.type != TK_TAGS) && (sToken.type != TK_LP))) {
tscError("invalid token, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql);
}
// ... (tag_col_list) TAGS(tag_val_list) ...
int32_t tagColsCnt = 0;
if (sToken.type == TK_LP) {
pStmt->mtb.tagColSet = true;
pStmt->mtb.tagCols = sToken;
int32_t tagColsStart = index;
while (1) {
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
}
if (sToken.type == TK_ID) {
++tagColsCnt;
}
if (sToken.type == TK_RP) {
break;
}
}
if (tagColsCnt == 0) {
tscError("tag column list expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "tag column list expected", pCmd->insertParam.sql);
}
pStmt->mtb.tagCols.n = index - tagColsStart + 1;
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
}
}
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_LP) {
......@@ -1385,6 +1424,11 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return tscSQLSyntaxErrMsg(pCmd->payload, "no tags", pCmd->insertParam.sql);
}
if (tagColsCnt > 0 && taosArrayGetSize(pStmt->mtb.tags) != tagColsCnt) {
tscError("not match tags, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "not match tags", pCmd->insertParam.sql);
}
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
tscError("sql error, sql:%s", pCmd->insertParam.sql);
......@@ -1407,7 +1451,13 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
int32_t j = 0;
while (1) {
if (pStmt->mtb.tagColSet) {
len = (size_t)snprintf(str, size - 1, "insert into %s using %.*s %.*s tags(",
name, pStmt->mtb.stbname.n, pStmt->mtb.stbname.z, pStmt->mtb.tagCols.n, pStmt->mtb.tagCols.z);
} else {
len = (size_t)snprintf(str, size - 1, "insert into %s using %.*s tags(", name, pStmt->mtb.stbname.n, pStmt->mtb.stbname.z);
}
if (len >= (size -1)) {
size *= 2;
free(str);
......@@ -1659,6 +1709,13 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
STMT_RET(TSDB_CODE_TSC_APP_ERROR);
}
if ((*t1)->pData == NULL) {
code = tscCreateDataBlockData(*t1, TSDB_PAYLOAD_SIZE, (*t1)->pTableMeta->tableInfo.rowSize, sizeof(SSubmitBlk));
if (code != TSDB_CODE_SUCCESS) {
STMT_RET(code);
}
}
SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
pCmd->batchSize = pBlk->numOfRows;
if (pBlk->numOfRows == 0) {
......@@ -1784,7 +1841,6 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
STMT_RET(code);
}
int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
......@@ -1792,8 +1848,6 @@ int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) {
return taos_stmt_set_tbname_tags(stmt, name, NULL);
}
int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
......@@ -1801,7 +1855,6 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
return taos_stmt_set_tbname_tags(stmt, name, NULL);
}
int taos_stmt_close(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
if (pStmt == NULL || pStmt->taos == NULL) {
......@@ -1868,7 +1921,6 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
}
}
int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
STscStmt* pStmt = (STscStmt*)stmt;
......@@ -1932,8 +1984,6 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in
STMT_RET(insertStmtBindParamBatch(pStmt, bind, colIdx));
}
int taos_stmt_add_batch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
......@@ -2086,7 +2136,6 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
}
}
char *taos_stmt_errstr(TAOS_STMT *stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
......@@ -2097,8 +2146,6 @@ char *taos_stmt_errstr(TAOS_STMT *stmt) {
return taos_errstr(pStmt->pSql);
}
const char *taos_data_type(int type) {
switch (type) {
case TSDB_DATA_TYPE_NULL: return "TSDB_DATA_TYPE_NULL";
......@@ -2115,4 +2162,3 @@ const char *taos_data_type(int type) {
default: return "UNKNOWN";
}
}
......@@ -2019,6 +2019,7 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
const char* msg8 = "not support distinct in nest query";
const char* msg9 = "_block_dist not support subquery, only support stable/table";
const char* msg10 = "not support group by in block func";
const char* msg11 = "invalid alias name";
// too many result columns not support order by in query
if (taosArrayGetSize(pSelNodeList) > TSDB_MAX_COLUMNS) {
......@@ -2041,6 +2042,9 @@ int32_t validateSelectNodeList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SArray* pS
hasDistinct = (pItem->distinct == true);
distIdx = hasDistinct ? i : -1;
}
if(pItem->aliasName != NULL && validateColumnName(pItem->aliasName) != TSDB_CODE_SUCCESS){
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg11);
}
int32_t type = pItem->pNode->type;
if (type == SQL_NODE_SQLFUNCTION) {
......@@ -6074,14 +6078,19 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, DEFAULT_TABLE_INDEX);
bool dbIncluded = false;
if (tscValidateName(&(pAlterSQL->name), true, &dbIncluded) != TSDB_CODE_SUCCESS) {
SStrToken tmpToken = pAlterSQL->name;
tmpToken.z= strndup(pAlterSQL->name.z, pAlterSQL->name.n);
if (tscValidateName(&tmpToken, true, &dbIncluded) != TSDB_CODE_SUCCESS) {
free(tmpToken.z);
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
code = tscSetTableFullName(&pTableMetaInfo->name, &(pAlterSQL->name), pSql, dbIncluded);
code = tscSetTableFullName(&pTableMetaInfo->name, &tmpToken, pSql, dbIncluded);
if (code != TSDB_CODE_SUCCESS) {
free(tmpToken.z);
return code;
}
free(tmpToken.z);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code != TSDB_CODE_SUCCESS) {
......
......@@ -468,7 +468,7 @@ SSqlObj* recreateSqlObj(SSub* pSub) {
}
registerSqlObj(pSql);
pSql->rootObj = pSql;
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tsem_wait(&pSub->sem);
......
......@@ -187,7 +187,7 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
continue;
}
if (functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_TID_TAG) {
if (functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_TID_TAG && functId != TSDB_FUNC_BLKINFO) {
return false;
}
}
......@@ -1845,6 +1845,32 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int32_t code = tscCreateDataBlockData(dataBuf, defaultSize, rowSize, startOffset);
if (code != TSDB_CODE_SUCCESS) {
tfree(dataBuf);
return code;
}
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
SParsedDataColInfo* pColInfo = &dataBuf->boundColumnInfo;
SSchema* pSchema = tscGetTableSchema(dataBuf->pTableMeta);
tscSetBoundColumnInfo(pColInfo, pSchema, dataBuf->pTableMeta->tableInfo.numOfColumns);
dataBuf->vgId = dataBuf->pTableMeta->vgId;
tNameAssign(&dataBuf->tableName, name);
assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
int32_t tscCreateDataBlockData(STableDataBlocks* dataBuf, size_t defaultSize, int32_t rowSize, int32_t startOffset) {
assert(dataBuf != NULL);
dataBuf->nAllocSize = (uint32_t)defaultSize;
dataBuf->headerSize = startOffset;
......@@ -1857,30 +1883,16 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
dataBuf->pData = malloc(dataBuf->nAllocSize);
if (dataBuf->pData == NULL) {
tscError("failed to allocated memory, reason:%s", strerror(errno));
tfree(dataBuf);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memset(dataBuf->pData, 0, sizeof(SSubmitBlk));
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
SParsedDataColInfo* pColInfo = &dataBuf->boundColumnInfo;
SSchema* pSchema = tscGetTableSchema(dataBuf->pTableMeta);
tscSetBoundColumnInfo(pColInfo, pSchema, dataBuf->pTableMeta->tableInfo.numOfColumns);
dataBuf->ordered = true;
dataBuf->prevTS = INT64_MIN;
dataBuf->rowSize = rowSize;
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
dataBuf->vgId = dataBuf->pTableMeta->vgId;
tNameAssign(&dataBuf->tableName, name);
assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
......
......@@ -253,9 +253,10 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo
}
if(tdAllocMemForCol(pCol, maxPoints) < 0) return -1;
if (numOfRows > 0) {
if (((rowOffset == 0) && (numOfRows > 0)) || ((rowOffset == -1) && (numOfRows >= 0))) {
// Find the first not null value, fill all previouse values as NULL
dataColSetNEleNull(pCol, numOfRows);
dataColSetNEleNull(pCol, numOfRows - rowOffset);
}
}
......@@ -463,9 +464,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
int rcol = 0;
int dcol = 0;
while (dcol < pCols->numOfCols) {
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
......@@ -476,14 +475,22 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
if(!isNull(value, pDataCol->type)) setCol = 1;
if (rowOffset == 0) {
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
} else if (rowOffset == -1) {
// for update 2
if (!isNull(value, pDataCol->type)) {
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
}
} else {
ASSERT(0);
}
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
if(forceSetNull || setCol) {
if(forceSetNull) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
}
dcol++;
......@@ -501,7 +508,6 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
int nRowCols = kvRowNCols(row);
while (dcol < pCols->numOfCols) {
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
......@@ -513,14 +519,22 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
if(!isNull(value, pDataCol->type)) setCol = 1;
if (rowOffset == 0) {
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
} else if (rowOffset == -1) {
// for update 2
if (!isNull(value, pDataCol->type)) {
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
}
} else {
ASSERT(0);
}
++dcol;
++rcol;
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
if(forceSetNull || setCol) {
if (forceSetNull) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
}
++dcol;
......
......@@ -73,7 +73,7 @@ int32_t tsMaxBinaryDisplayWidth = 30;
* -1: all data are not compressed
* other values: if the message payload size is greater than the tsCompressMsgSize, the message will be compressed.
*/
int32_t tsCompressMsgSize = -1;
int32_t tsCompressMsgSize = 512 * 1024;
/* denote if server needs to compress the retrieved column data before adding to the rpc response message body.
* 0: all data are compressed
......@@ -290,7 +290,7 @@ char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRES
int8_t tsDeadLockKillQuery = 0;
// default JSON string type
char tsDefaultJSONStrType[7] = "binary";
char tsDefaultJSONStrType[7] = "nchar";
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash.
int32_t (*monStartSystemFp)() = NULL;
......
......@@ -5,6 +5,7 @@ import com.taosdata.jdbc.TSDBErrorNumbers;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity;
import org.apache.http.NoHttpResponseException;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
......
......@@ -854,6 +854,7 @@ int getMetaFromInsertJsonFile(cJSON *root) {
g_Dbs.db[i].superTbls[j].iface = STMT_IFACE;
} else if (0 == strcasecmp(stbIface->valuestring, "sml")) {
g_Dbs.db[i].superTbls[j].iface = SML_IFACE;
g_args.iface = SML_IFACE;
} else {
errorPrint(
"failed to read json, insert_mode %s not recognized\n",
......
......@@ -204,13 +204,14 @@ int getChildNameOfSuperTableWithLimitAndOffset(TAOS *taos, char *dbName,
int64_t childTblCount = (limit < 0) ? DEFAULT_CHILDTABLES : limit;
int64_t count = 0;
char * childTblName = *childTblNameOfSuperTbl;
char * pTblName = childTblName;
if (childTblName == NULL) {
childTblName = (char *)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (childTblName == NULL) {
errorPrint("%s", "failed to allocate memory\n");
}
}
char *pTblName = childTblName;
snprintf(limitBuf, 100, " limit %" PRId64 " offset %" PRIu64 "", limit,
offset);
......
......@@ -1139,7 +1139,7 @@ static int32_t mnodeAlterDbFp(SMnodeMsg *pMsg) {
return sdbUpdateRow(&row);
}
//bnNotify();
bnNotify();
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
}
......
......@@ -121,7 +121,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
}
if (!tsMnodeShowMetaFp[pShowMsg->type] || !tsMnodeShowRetrieveFp[pShowMsg->type]) {
mError("show type:%s is not support", mnodeGetShowType(pShowMsg->type));
mWarn("show type:%s is not support", mnodeGetShowType(pShowMsg->type));
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
}
......
......@@ -743,19 +743,6 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
return 0;
}
static bool mnodeFilterVgroups(SVgObj *pVgroup, STableObj *pTable) {
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
return true;
}
SCTableObj *pCTable = (SCTableObj *)pTable;
if (pVgroup->vgId == pCTable->vgId) {
return true;
} else {
return false;
}
}
static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
int32_t numOfRows = 0;
SVgObj *pVgroup = NULL;
......@@ -771,11 +758,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
return 0;
}
STableObj *pTable = NULL;
if (pShow->payloadLen > 0 ) {
pTable = mnodeGetTable(pShow->payload);
}
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextVgroup(pShow->pIter, &pVgroup);
if (pVgroup == NULL) break;
......@@ -785,11 +767,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
continue;
}
if (!mnodeFilterVgroups(pVgroup, pTable)) {
mnodeDecVgroupRef(pVgroup);
continue;
}
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
......@@ -847,7 +824,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
mnodeVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow);
pShow->numOfReads += numOfRows;
mnodeDecTableRef(pTable);
mnodeDecDbRef(pDb);
return numOfRows;
......
......@@ -424,29 +424,44 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
}
} //end switch fromPrecision
end_:
if (tempResult > (double)INT64_MAX) return INT64_MAX;
if (tempResult < (double)INT64_MIN) return INT64_MIN + 1; // INT64_MIN means NULL
if (tempResult >= (double)INT64_MAX) return INT64_MAX;
if (tempResult <= (double)INT64_MIN) return INT64_MIN + 1; // INT64_MIN means NULL
return time;
}
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
switch (unit) {
case 's':
case 's':{
double temp = ((double)val) * MILLISECOND_PER_SECOND;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_SECOND, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'm':
}
case 'm':{
double temp = ((double)val) * MILLISECOND_PER_MINUTE;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_MINUTE, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'h':
}
case 'h':{
double temp = ((double)val) * MILLISECOND_PER_HOUR;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_HOUR, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'd':
}
case 'd': {
double temp = ((double)val) * MILLISECOND_PER_DAY;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_DAY, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'w':
}
case 'w': {
double temp = ((double)val) * MILLISECOND_PER_WEEK;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_WEEK, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
}
case 'a':
(*result) = convertTimePrecision(val, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
......
......@@ -127,12 +127,6 @@ cmd ::= SHOW dbPrefix(X) VGROUPS. {
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
}
cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). {
SStrToken token;
tSetDbName(&token, &X);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &Y);
}
//drop configure for tables
cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). {
X.n += Z.n;
......
......@@ -821,7 +821,7 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else {
return (pInfo->ts > w->ekey) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
return (pInfo->ts >= w->ekey) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
}
......@@ -4459,7 +4459,7 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
"5th=[%d], 10th=[%d], 20th=[%d], 30th=[%d], 40th=[%d], 50th=[%d]\n\t "
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]\n\t "
"Min=[%"PRId64"(Rows)] Max=[%"PRId64"(Rows)] Avg=[%"PRId64"(Rows)] Stddev=[%.2f] \n\t "
"Rows=[%"PRIu64"], Blocks=[%"PRId64"], SmallBlocks=[%d], Size=[%.3f(Kb)] Comp=[%.2f]\n\t "
"Rows=[%"PRIu64"], Blocks=[%"PRId64"], SmallBlocks=[%d], Size=[%.3f(Kb)] Comp=[%.5g]\n\t "
"RowsInMem=[%d] \n\t",
percentiles[0], percentiles[1], percentiles[2], percentiles[3], percentiles[4], percentiles[5],
percentiles[6], percentiles[7], percentiles[8], percentiles[9], percentiles[10], percentiles[11],
......@@ -4491,230 +4491,182 @@ void blockinfo_func_finalizer(SQLFunctionCtx* pCtx) {
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define CFR_SET_VAL(type, data, pCtx, func, i, step, notNullElems) \
#define CFR_SET_VAL(type, data, pCtx, func, i, step) \
do { \
type *pData = (type *) data; \
type *pOutput = (type *) pCtx->pOutput; \
\
for (; i < pCtx->size && i >= 0; i += step) { \
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { \
continue; \
if (pCtx->hasNull && isNull((const char *)&pData[i], pCtx->inputType)) { \
*pOutput++ = pData[i]; \
} else { \
*pOutput++ = (type)func((double)pData[i]); \
} \
\
*pOutput++ = (type) func((double) pData[i]); \
\
notNullElems++; \
} \
} while (0)
#define CFR_SET_VAL_DOUBLE(data, pCtx, func, i, step, notNullElems) \
do { \
double *pData = (double *) data; \
double *pOutput = (double *) pCtx->pOutput; \
\
for (; i < pCtx->size && i >= 0; i += step) { \
if (pCtx->hasNull && isNull((const char*) &pData[i], pCtx->inputType)) { \
continue; \
} \
\
SET_DOUBLE_VAL(pOutput, func(pData[i])); \
pOutput++; \
\
notNullElems++; \
} \
} while (0)
static void ceil_function(SQLFunctionCtx *pCtx) {
void *data = GET_INPUT_DATA_LIST(pCtx);
int32_t notNullElems = 0;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_INT: {
CFR_SET_VAL(int32_t, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(int32_t, data, pCtx, ceil, i, step);
break;
};
case TSDB_DATA_TYPE_UINT: {
CFR_SET_VAL(uint32_t, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(uint32_t, data, pCtx, ceil, i, step);
break;
};
case TSDB_DATA_TYPE_BIGINT: {
CFR_SET_VAL(int64_t, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(int64_t, data, pCtx, ceil, i, step);
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
CFR_SET_VAL(uint64_t, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(uint64_t, data, pCtx, ceil, i, step);
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
CFR_SET_VAL_DOUBLE(data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(double, data, pCtx, ceil, i, step);
break;
}
case TSDB_DATA_TYPE_FLOAT: {
CFR_SET_VAL(float, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(float, data, pCtx, ceil, i, step);
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
CFR_SET_VAL(int16_t, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(int16_t, data, pCtx, ceil, i, step);
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
CFR_SET_VAL(uint16_t, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(uint16_t, data, pCtx, ceil, i, step);
break;
}
case TSDB_DATA_TYPE_TINYINT: {
CFR_SET_VAL(int8_t, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(int8_t, data, pCtx, ceil, i, step);
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
CFR_SET_VAL(uint8_t, data, pCtx, ceil, i, step, notNullElems);
CFR_SET_VAL(uint8_t, data, pCtx, ceil, i, step);
break;
}
default:
qError("error input type");
}
if (notNullElems <= 0) {
/*
* current block may be null value
*/
assert(pCtx->hasNull);
} else {
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
}
GET_RES_INFO(pCtx)->numOfRes += pCtx->size;
}
static void floor_function(SQLFunctionCtx *pCtx) {
void *data = GET_INPUT_DATA_LIST(pCtx);
int32_t notNullElems = 0;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_INT: {
CFR_SET_VAL(int32_t, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(int32_t, data, pCtx, floor, i, step);
break;
};
case TSDB_DATA_TYPE_UINT: {
CFR_SET_VAL(uint32_t, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(uint32_t, data, pCtx, floor, i, step);
break;
};
case TSDB_DATA_TYPE_BIGINT: {
CFR_SET_VAL(int64_t, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(int64_t, data, pCtx, floor, i, step);
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
CFR_SET_VAL(uint64_t, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(uint64_t, data, pCtx, floor, i, step);
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
CFR_SET_VAL_DOUBLE(data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(double, data, pCtx, floor, i, step);
break;
}
case TSDB_DATA_TYPE_FLOAT: {
CFR_SET_VAL(float, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(float, data, pCtx, floor, i, step);
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
CFR_SET_VAL(int16_t, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(int16_t, data, pCtx, floor, i, step);
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
CFR_SET_VAL(uint16_t, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(uint16_t, data, pCtx, floor, i, step);
break;
}
case TSDB_DATA_TYPE_TINYINT: {
CFR_SET_VAL(int8_t, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(int8_t, data, pCtx, floor, i, step);
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
CFR_SET_VAL(uint8_t, data, pCtx, floor, i, step, notNullElems);
CFR_SET_VAL(uint8_t, data, pCtx, floor, i, step);
break;
}
default:
qError("error input type");
}
if (notNullElems <= 0) {
/*
* current block may be null value
*/
assert(pCtx->hasNull);
} else {
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
}
GET_RES_INFO(pCtx)->numOfRes += pCtx->size;
}
static void round_function(SQLFunctionCtx *pCtx) {
void *data = GET_INPUT_DATA_LIST(pCtx);
int32_t notNullElems = 0;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_INT: {
CFR_SET_VAL(int32_t, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(int32_t, data, pCtx, round, i, step);
break;
};
case TSDB_DATA_TYPE_UINT: {
CFR_SET_VAL(uint32_t, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(uint32_t, data, pCtx, round, i, step);
break;
};
case TSDB_DATA_TYPE_BIGINT: {
CFR_SET_VAL(int64_t, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(int64_t, data, pCtx, round, i, step);
break;
}
case TSDB_DATA_TYPE_UBIGINT: {
CFR_SET_VAL(uint64_t, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(uint64_t, data, pCtx, round, i, step);
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
CFR_SET_VAL_DOUBLE(data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(double, data, pCtx, round, i, step);
break;
}
case TSDB_DATA_TYPE_FLOAT: {
CFR_SET_VAL(float, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(float, data, pCtx, round, i, step);
break;
}
case TSDB_DATA_TYPE_SMALLINT: {
CFR_SET_VAL(int16_t, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(int16_t, data, pCtx, round, i, step);
break;
}
case TSDB_DATA_TYPE_USMALLINT: {
CFR_SET_VAL(uint16_t, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(uint16_t, data, pCtx, round, i, step);
break;
}
case TSDB_DATA_TYPE_TINYINT: {
CFR_SET_VAL(int8_t, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(int8_t, data, pCtx, round, i, step);
break;
}
case TSDB_DATA_TYPE_UTINYINT: {
CFR_SET_VAL(uint8_t, data, pCtx, round, i, step, notNullElems);
CFR_SET_VAL(uint8_t, data, pCtx, round, i, step);
break;
}
default:
qError("error input type");
}
if (notNullElems <= 0) {
/*
* current block may be null value
*/
assert(pCtx->hasNull);
} else {
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
}
GET_RES_INFO(pCtx)->numOfRes += pCtx->size;
}
#undef CFR_SET_VAL
#undef CFR_SET_VAL_DOUBLE
//////////////////////////////////////////////////////////////////////////////////
//cumulative_sum function
......
......@@ -2354,7 +2354,6 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t *gR
}
cStart = i;
cEnd = i;
cColNum = gRes[i]->colNum;
}
......
......@@ -342,7 +342,7 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
tsdbError(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag "
"version %d",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->tagSchema));
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->pSuper->tagSchema));
terrno = TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE;
return -1;
}
......
......@@ -1377,66 +1377,63 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock,
return code;
}
static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
int firstPos, lastPos, midPos = -1;
int numOfRows;
TSKEY* keyList;
// search last keyList[ret] < key order asc and keyList[ret] > key order desc
static int doBinarySearchKey(TSKEY* keyList, int num, int pos, TSKEY key, int order) {
// start end posistion
int s, e;
s = pos;
assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
if (num <= 0) return -1;
keyList = (TSKEY*)pValue;
firstPos = 0;
lastPos = num - 1;
// check
assert(pos >=0 && pos < num);
assert(num > 0);
if (order == TSDB_ORDER_DESC) {
if (order == TSDB_ORDER_ASC) {
// find the first position which is smaller than the key
e = num - 1;
if (key < keyList[pos])
return -1;
while (1) {
if (key >= keyList[lastPos]) return lastPos;
if (key == keyList[firstPos]) return firstPos;
if (key < keyList[firstPos]) return firstPos - 1;
numOfRows = lastPos - firstPos + 1;
midPos = (numOfRows >> 1) + firstPos;
if (key < keyList[midPos]) {
lastPos = midPos - 1;
} else if (key > keyList[midPos]) {
firstPos = midPos + 1;
} else {
break;
}
// check can return
if (key >= keyList[e])
return e;
if (key <= keyList[s])
return s;
if (e - s <= 1)
return s;
// change start or end position
int mid = s + (e - s + 1)/2;
if (keyList[mid] > key)
e = mid;
else if(keyList[mid] < key)
s = mid;
else
return mid;
}
} else {
} else { // DESC
// find the first position which is bigger than the key
while (1) {
if (key <= keyList[firstPos]) return firstPos;
if (key == keyList[lastPos]) return lastPos;
if (key > keyList[lastPos]) {
lastPos = lastPos + 1;
if (lastPos >= num)
e = 0;
if (key > keyList[pos])
return -1;
while (1) {
// check can return
if (key <= keyList[e])
return e;
if (key >= keyList[s])
return s;
if (s - e <= 1)
return s;
// change start or end position
int mid = s - (s - e + 1)/2;
if (keyList[mid] < key)
e = mid;
else if(keyList[mid] > key)
s = mid;
else
return lastPos;
}
numOfRows = lastPos - firstPos + 1;
midPos = (numOfRows >> 1) + firstPos;
if (key < keyList[midPos]) {
lastPos = midPos - 1;
} else if (key > keyList[midPos]) {
firstPos = midPos + 1;
} else {
break;
return mid;
}
}
}
return midPos;
}
static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) {
......@@ -1844,7 +1841,6 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl
int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo) {
// NOTE: reverse the order to find the end position in data block
int32_t endPos = -1;
int32_t order = ASCENDING_TRAVERSE(pQueryHandle->order)? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
SQueryFilePos* cur = &pQueryHandle->cur;
SDataCols* pCols = pQueryHandle->rhelper.pDCols[0];
......@@ -1857,7 +1853,9 @@ int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBl
cur->mixBlock = (cur->pos != pBlockInfo->rows - 1);
} else {
assert(pCols->numOfRows > 0);
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
int pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0 : pBlockInfo->rows - 1;
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pos, pQueryHandle->window.ekey, pQueryHandle->order);
assert(endPos != -1);
cur->mixBlock = true;
}
......@@ -1877,11 +1875,9 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
assert(pCols->cols[0].type == TSDB_DATA_TYPE_TIMESTAMP && pCols->cols[0].colId == PRIMARYKEY_TIMESTAMP_COL_INDEX &&
cur->pos >= 0 && cur->pos < pBlock->numOfRows);
TSKEY* tsArray = pCols->cols[0].pData;
assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst && tsArray[pBlock->numOfRows-1] == pBlock->keyLast);
// for search the endPos, so the order needs to reverse
int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC;
// key read from file
TSKEY* keyFile = pCols->cols[0].pData;
assert(pCols->numOfRows == pBlock->numOfRows && keyFile[0] == pBlock->keyFirst && keyFile[pBlock->numOfRows-1] == pBlock->keyLast);
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
......@@ -1889,6 +1885,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
STable* pTable = pCheckInfo->pTableObj;
int32_t endPos = getEndPosInDataBlock(pQueryHandle, &blockInfo);
tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d,"
"end:%d, 0x%"PRIx64,
pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, blockInfo.window.skey, blockInfo.window.ekey,
......@@ -1902,6 +1899,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
STSchema* pSchema1 = NULL;
STSchema* pSchema2 = NULL;
// position in file ->fpos
int32_t pos = cur->pos;
cur->win = TSWINDOW_INITIALIZER;
......@@ -1918,19 +1916,23 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
break;
}
TSKEY key = memRowKey(row1);
if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
TSKEY keyMem = memRowKey(row1);
if ((keyMem > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(keyMem < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
break;
}
if (((pos > endPos || tsArray[pos] > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((pos < endPos || tsArray[pos] < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
// break if pos not in this block endPos range. note old code when pos is -1 can crash.
if(ASCENDING_TRAVERSE(pQueryHandle->order)) { //ASC
if(pos > endPos || keyFile[pos] > pQueryHandle->window.ekey)
break;
} else { //DESC
if(pos < endPos || keyFile[pos] < pQueryHandle->window.ekey)
break;
}
if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if ((keyMem < keyFile[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(keyMem > keyFile[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (rv1 != memRowVersion(row1)) {
pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1), (int8_t)memRowType(row1));
rv1 = memRowVersion(row1);
......@@ -1942,16 +1944,18 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, true);
numOfRows += 1;
// record start key with memory key if not
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
cur->win.skey = keyMem;
}
cur->win.ekey = key;
cur->lastKey = key + step;
cur->win.ekey = keyMem;
cur->lastKey = keyMem + step;
cur->mixBlock = true;
moveToNextRowInMem(pCheckInfo);
} else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
// same select mem key if update is true
} else if (keyMem == keyFile[pos]) {
if (pCfg->update) {
if(pCfg->update == TD_ROW_PARTIAL_UPDATE) {
doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, pos, pos);
......@@ -1969,31 +1973,36 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, forceSetNull);
numOfRows += 1;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
cur->win.skey = keyMem;
}
cur->win.ekey = key;
cur->lastKey = key + step;
cur->win.ekey = keyMem;
cur->lastKey = keyMem + step;
cur->mixBlock = true;
//mem move next
moveToNextRowInMem(pCheckInfo);
//file move next, discard file row
pos += step;
} else {
// not update, only mem move to next, discard mem row
moveToNextRowInMem(pCheckInfo);
}
} else if ((key > tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
// put file row
} else if ((keyMem > keyFile[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(keyMem < keyFile[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
cur->win.skey = keyFile[pos];
}
int32_t end = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, key, order);
int32_t end = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pos, keyMem, pQueryHandle->order);
assert(end != -1);
if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
if (keyFile[end] == keyMem) { // the value of key in cache equals to the end timestamp value, ignore it
if (pCfg->update == TD_ROW_DISCARD_UPDATE) {
moveToNextRowInMem(pCheckInfo);
} else {
// can update, don't copy then deal on next loop with keyMem == keyFile[pos]
end -= step;
}
}
......@@ -2001,10 +2010,17 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t qstart = 0, qend = 0;
getQualifiedRowsPos(pQueryHandle, pos, end, numOfRows, &qstart, &qend);
if(qend >= qstart) {
// copy qend - qstart + 1 rows from file
numOfRows = doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, qstart, qend);
pos += (qend - qstart + 1) * step;
int32_t num = qend - qstart + 1;
pos += num * step;
} else {
// nothing copy from file
pos += step;
}
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? tsArray[qend]:tsArray[qstart];
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? keyFile[qend] : keyFile[qstart];
cur->lastKey = cur->win.ekey + step;
}
} while (numOfRows < pQueryHandle->outputCapacity);
......@@ -2021,7 +2037,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
!ASCENDING_TRAVERSE(pQueryHandle->order))) {
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
cur->win.skey = keyFile[pos];
}
int32_t start = -1, end = -1;
......@@ -2030,7 +2046,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
numOfRows = doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, start, end);
pos += (end - start + 1) * step;
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? tsArray[end]:tsArray[start];
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? keyFile[end] : keyFile[start];
cur->lastKey = cur->win.ekey + step;
cur->mixBlock = true;
}
......@@ -2946,6 +2962,9 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
// handle data in cache situation
bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) {
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
if (pQueryHandle == NULL) {
return false;
}
if (emptyQueryTimewindow(pQueryHandle)) {
tsdbDebug("%p query window not overlaps with the data set, no result returned, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
......@@ -3065,6 +3084,9 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qId, pMemRef);
tfree(cond.colList);
if (pSecQueryHandle == NULL) {
goto out_of_memory;
}
// current table, only one table
STableCheckInfo* pCurrent = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
......
......@@ -279,6 +279,7 @@ python3 ./test.py -f query/queryCnameDisplay.py
# python3 ./test.py -f query/long_where_query.py
python3 test.py -f query/nestedQuery/queryWithSpread.py
python3 ./test.py -f query/bug6586.py
# python3 ./test.py -f query/bug5903.py
#stream
python3 ./test.py -f stream/metric_1.py
......@@ -363,7 +364,7 @@ python3 ./test.py -f query/last_row_cache.py
python3 ./test.py -f account/account_create.py
python3 ./test.py -f alter/alter_table.py
python3 ./test.py -f query/queryGroupbySort.py
python3 ./test.py -f functions/queryTestCases.py
#python3 ./test.py -f functions/queryTestCases.py
python3 ./test.py -f functions/function_stateWindow.py
python3 ./test.py -f functions/function_derivative.py
python3 ./test.py -f functions/function_irate.py
......
......@@ -1289,6 +1289,34 @@ class TDTestCase:
" fill(linear)"
]
tdSql.prepare()
#need insert new data --data type is double or float and tests ceil floor round .
tdSql.execute("create table if not exists jsons7(ts timestamp, dataInt int, dataBool bool, datafloat float, datadouble double, dataStr nchar(50)) tags(jtag nchar(128))")
tdSql.execute("insert into jsons7_1 using jsons7 tags('{\"nv\":null,\"tea\":true,\"\":false,\" \":123,\"tea\":false}') values (now,2,'true',0.9,0.1,'123')")
tdSql.query("select * from jsons7")
tdSql.checkRows(1)
tdSql.execute("insert into jsons7_1 values (now+1s,3,'true',-4.8,-5.5,'123') ")
tdSql.execute("insert into jsons7_1 values (now+2s,4,'true',1.9998,2.00001,'123') ")
tdSql.execute("insert into jsons7_2 using jsons7 tags('{\"nv\":null,\"tea\":true,\"\":false,\"tag\":123,\"tea\":false}') values (now,5,'true',4.01,2.2,'123') ")
tdSql.execute("insert into jsons7_2 (ts,datadouble) values (now+3s,-0.9) ")
tdSql.execute("insert into jsons7_2 (ts,datadouble) values (now+4s,-2.9) ")
tdSql.execute("insert into jsons7_2 (ts,datafloat) values (now+1s,-0.9) ")
tdSql.execute("insert into jsons7_2 (ts,datafloat) values (now+2s,-1.9) ")
tdSql.query("select ts,ceil(dataint),ceil(datafloat),ceil(datadouble) from jsons7")
tdSql.checkRows(8)
tdSql.checkData(5, 1, None)
tdSql.checkData(6, 2, None)
tdSql.checkData(7, 3, -2)
tdSql.query("select ceil(dataint),ceil(datafloat),ceil(datadouble) from jsons7")
tdSql.checkRows(8)
tdSql.checkData(5, 1, -1)
tdSql.checkData(5, 2, None)
tdSql.checkData(7, 0, None)
tdSql.checkData(7, 2, -2)
tdSql.query("select ts,floor(dataint),floor(datafloat),floor(datadouble) from jsons7")
tdSql.query("select floor(dataint),floor(datafloat),floor(datadouble) from jsons7")
tdSql.query("select ts,round(dataint),round(datafloat),round(datadouble) from jsons7")
tdSql.query("select round(dataint),round(datafloat),round(datadouble) from jsons7")
tdSql.execute(
"create stable super (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\
double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \
......
......@@ -68,9 +68,9 @@ class TDTestCase:
tdSql.checkData(0, 0, "2018-09-17 09:00:10.000")
tdSql.checkData(0, 1, "2018-09-17 09:00:10.000")
tdSql.checkData(0, 3, "2018-09-17 09:00:10.000")
tdSql.checkData(1, 0, "2018-09-17 09:00:20.009")
tdSql.checkData(1, 1, "2018-09-17 09:00:20.009")
tdSql.checkData(1, 3, "2018-09-17 09:00:20.009")
tdSql.checkData(1, 0, "2018-09-17 09:00:20.000")
tdSql.checkData(1, 1, "2018-09-17 09:00:20.000")
tdSql.checkData(1, 3, "2018-09-17 09:00:20.000")
tdSql.query("select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname)")
......@@ -150,6 +150,7 @@ class TDTestCase:
tdSql.error("select derivative(col, -106752999999999922222d, 0) from stb group by tbname"); #overflow error
tdSql.error("select derivative(col, 10y, 0) from stb group by tbname") #TD-10399, DB error: syntax error near '10y, 0) from stb group by tbname;'
tdSql.error("select derivative(col, -106752d, 0) from stb group by tbname") #TD-10398 overflow tips
tdSql.error("select derivative(col, 106751991168d, 0) from stb group by tbname") #TD-10398 overflow tips
def run(self):
tdSql.prepare()
......
......@@ -84,7 +84,7 @@ class TDTestCase:
index_value = np.dstack((cpms_index[0])).squeeze()
tdSql.query("show variables")
tdSql.checkData(index_value, 1, -1)
tdSql.checkData(index_value, 1, 524288)
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
......@@ -1570,7 +1570,7 @@ class TDTestCase:
# master branch
self.td3690()
self.td4082()
# self.td4082()
self.td4288()
self.td4724()
self.td5935()
......
......@@ -45,6 +45,11 @@ class TDTestCase:
# test case for https://jira.taosdata.com:18080/browse/TD-3716:
tdSql.error("insert into tb(now, 1)")
# test case for TD-10717
tdSql.error("insert into tb values(now,1),,(now+1s,1)")
tdSql.execute("insert into tb values(now+2s,1),(now+3s,1),(now+4s,1)")
tdSql.query("select * from tb")
tdSql.checkRows(insertRows + 4 +3)
def stop(self):
tdSql.close()
......
......@@ -686,10 +686,10 @@ class TDTestCase:
print("schemaless_insert result {}".format(code))
tdSql.query("describe `1234`")
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query("select * from `123`")
tdSql.checkRows(1)
#tdSql.query("select * from `123`")
#tdSql.checkRows(1)
payload = ['''
{
......@@ -711,10 +711,10 @@ class TDTestCase:
print("schemaless_insert result {}".format(code))
tdSql.query("describe `int`")
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query("select * from `and`")
tdSql.checkRows(1)
#tdSql.query("select * from `and`")
#tdSql.checkRows(1)
payload = ['''
{
......@@ -736,10 +736,10 @@ class TDTestCase:
print("schemaless_insert result {}".format(code))
tdSql.query("describe `double`")
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query("select * from `for`")
tdSql.checkRows(1)
#tdSql.query("select * from `for`")
#tdSql.checkRows(1)
payload = ['''
{
......@@ -761,10 +761,10 @@ class TDTestCase:
print("schemaless_insert result {}".format(code))
tdSql.query("describe `from`")
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query("select * from `!@#.^&`")
tdSql.checkRows(1)
#tdSql.query("select * from `!@#.^&`")
#tdSql.checkRows(1)
payload = ['''
{
......@@ -786,10 +786,10 @@ class TDTestCase:
print("schemaless_insert result {}".format(code))
tdSql.query("describe `!@#$.%^&*()`")
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query("select * from `none`")
tdSql.checkRows(1)
#tdSql.query("select * from `none`")
#tdSql.checkRows(1)
payload = ['''
{
......@@ -836,11 +836,10 @@ class TDTestCase:
print("schemaless_insert result {}".format(code))
tdSql.query("describe `stable`")
tdSql.checkRows(8)
tdSql.query("select * from `key`")
tdSql.checkRows(1)
tdSql.checkRows(9)
#tdSql.query("select * from `key`")
#tdSql.checkRows(1)
def stop(self):
tdSql.close()
......
......@@ -32,10 +32,10 @@ class TDTestCase:
### metric ###
print("============= step1 : test metric ================")
lines0 = [
"stb0_0 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
"stb0_1 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
"stb0_2 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
".stb0.3. 1626006833639000000ns 4i8 host=\"host0\" interface=\"eth0\"",
"stb0_0 1626006833639 4i8 host=\"host0\" interface=\"eth0\"",
"stb0_1 1626006833639 4i8 host=\"host0\" interface=\"eth0\"",
"stb0_2 1626006833639 4i8 host=\"host0\" interface=\"eth0\"",
".stb0.3. 1626006833639 4i8 host=\"host0\" interface=\"eth0\"",
]
code = self._conn.schemaless_insert(lines0, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
......@@ -287,72 +287,72 @@ class TDTestCase:
#tag ID as child table name
lines3_1 = [
"stb3_1 1626006833610 1 id=child_table1 host=host1",
"stb3_1 1626006833610 2 host=host2 iD=child_table2",
"stb3_1 1626006833610 3 ID=child_table3 host=host3"
]
#lines3_1 = [
# "stb3_1 1626006833610 1 id=child_table1 host=host1",
# "stb3_1 1626006833610 2 host=host2 iD=child_table2",
# "stb3_1 1626006833610 3 ID=child_table3 host=host3"
# ]
code = self._conn.schemaless_insert(lines3_1, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
print("schemaless_insert result {}".format(code))
#code = self._conn.schemaless_insert(lines3_1, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
#print("schemaless_insert result {}".format(code))
tdSql.query("select * from stb3_1")
tdSql.checkRows(3)
#tdSql.query("select * from stb3_1")
#tdSql.checkRows(3)
tdSql.query("show tables like \"child%\"")
tdSql.checkRows(3)
#tdSql.query("show tables like \"child%\"")
#tdSql.checkRows(3)
tdSql.checkData(0, 0, "child_table1")
#tdSql.checkData(0, 0, "child_table1")
### special characters and keywords ###
print("============= step4 : test special characters and keywords ================")
lines4_1 = [
"1234 1626006833610ms 1 id=123 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"int 1626006833610ms 2 id=and 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"double 1626006833610ms 2 id=for 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"from 1626006833610ms 2 id=!@#.^& 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"!@#$.%^&*() 1626006833610ms 2 id=none 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"STABLE 1626006833610ms 2 id=KEY 456=true int=true double=false TAG=1 FROM=2 COLUMN=false",
"1234 1626006833610 1 id=123 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"int 1626006833610 2 id=and 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"double 1626006833610 2 id=for 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"from 1626006833610 2 id=!@#.^& 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"!@#$.%^&*() 1626006833610 2 id=none 456=true int=true double=false into=1 from=2 !@#$.%^&*()=false",
"STABLE 1626006833610 2 id=KEY 456=true int=true double=false TAG=1 FROM=2 COLUMN=false",
]
code = self._conn.schemaless_insert(lines4_1, TDSmlProtocolType.TELNET.value, TDSmlTimestampType.NOT_CONFIGURED.value)
print("schemaless_insert result {}".format(code))
tdSql.query('describe `1234`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `int`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `double`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `from`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `!@#$.%^&*()`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `stable`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('select * from `123`')
tdSql.checkRows(1)
#tdSql.query('select * from `123`')
#tdSql.checkRows(1)
tdSql.query('select * from `and`')
tdSql.checkRows(1)
#tdSql.query('select * from `and`')
#tdSql.checkRows(1)
tdSql.query('select * from `for`')
tdSql.checkRows(1)
#tdSql.query('select * from `for`')
#tdSql.checkRows(1)
tdSql.query('select * from `!@#.^&`')
tdSql.checkRows(1)
#tdSql.query('select * from `!@#.^&`')
#tdSql.checkRows(1)
tdSql.query('select * from `none`')
tdSql.checkRows(1)
#tdSql.query('select * from `none`')
#tdSql.checkRows(1)
tdSql.query('select * from `key`')
tdSql.checkRows(1)
#tdSql.query('select * from `key`')
#tdSql.checkRows(1)
def stop(self):
tdSql.close()
......
......@@ -83,8 +83,8 @@ class TDTestCase:
tdSql.query('select tbname, * from sth')
tdSql.checkRows(2)
tdSql.query('select tbname, * from childtable')
tdSql.checkRows(1)
#tdSql.query('select tbname, * from childtable')
#tdSql.checkRows(1)
###Special Character and keyss
self._conn.schemaless_insert([
......@@ -98,40 +98,40 @@ class TDTestCase:
tdSql.execute('reset query cache')
tdSql.query('describe `1234`')
tdSql.checkRows(9)
tdSql.checkRows(10)
tdSql.query('describe `int`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `double`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `from`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `!@#$.%^&*()`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('describe `stable`')
tdSql.checkRows(8)
tdSql.checkRows(9)
tdSql.query('select * from `3456`')
tdSql.checkRows(1)
#tdSql.query('select * from `3456`')
#tdSql.checkRows(1)
tdSql.query('select * from `and`')
tdSql.checkRows(1)
#tdSql.query('select * from `and`')
#tdSql.checkRows(1)
tdSql.query('select * from `for`')
tdSql.checkRows(1)
#tdSql.query('select * from `for`')
#tdSql.checkRows(1)
tdSql.query('select * from `!@#$.%^`')
tdSql.checkRows(1)
#tdSql.query('select * from `!@#$.%^`')
#tdSql.checkRows(1)
tdSql.query('select * from `none`')
tdSql.checkRows(1)
#tdSql.query('select * from `none`')
#tdSql.checkRows(1)
tdSql.query('select * from `create`')
tdSql.checkRows(1)
#tdSql.query('select * from `create`')
#tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
# TD-5903 show db.vgroups xxx. xxx is invalid content, but still returns results.
tdSql.execute("create database if not exists test_5903")
tdSql.execute("show test_5903.vgroups")
tdSql.error("show test_5903.vgroups xxx")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -31,14 +31,43 @@ class TDTestCase:
tdSql.execute("create database bug6387 ")
tdSql.execute("use bug6387 ")
tdSql.execute("create table test(ts timestamp, c1 int) tags(t1 int)")
prefix = "insert into "
sql = ""
for i in range(5000):
sql = "insert into t%d using test tags(1) values " % i
temp = "t%d using test tags(1) values " % i
for j in range(21):
sql = sql + "(now+%ds,%d)" % (j ,j )
tdSql.execute(sql)
temp = temp + "(now+%ds,%d)" % (j ,j )
sql = sql + temp
if i % 1000 == 0 :
tdSql.execute(prefix + sql)
sql = ""
tdSql.query("select count(*) from test interval(1s) group by tbname")
tdSql.checkData(0,1,1)
def escape_ascii(self):
tdSql.execute('drop database if exists db')
tdSql.execute('create database db')
tdSql.execute('use db')
tdSql.execute("create table car (ts timestamp, s int) tags(j int)")
for i in range(32,127):
if i == 96 : continue #`
sql = 'create table `是否出现%s` using car tags(%d)' % (chr(i), i)
tdSql.execute(sql)
for i in range(32,65):
sql = 'select tbname from car where tbname like "是否出现\%s"' % chr(i)
tdSql.query(sql)
if i == 37 : continue # " `
tdSql.checkRows(1)
for i in range(91,97):
sql = 'select tbname from car where tbname like "是否出现\%s"' % chr(i)
tdSql.query(sql)
if i == 96: continue # `
tdSql.checkRows(1)
for i in range(123,127):
sql = 'select tbname from car where tbname like "是否出现\%s"' % chr(i)
tdSql.query(sql)
tdSql.checkRows(1)
def run(self):
tdSql.prepare()
......@@ -165,6 +194,10 @@ class TDTestCase:
tdSql.query("show create table t1")
tdSql.checkRows(1)
#TS-636
tdLog.info("case for TS-636")
self.escape_ascii()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
......
......@@ -14,6 +14,7 @@ from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.common import tdCom
import random
class TDTestCase:
def init(self, conn, logSql):
......@@ -46,8 +47,31 @@ class TDTestCase:
for i in range(100):
tdSql.query(f'select {table_name_sub1},{table_name_sub2},{table_name_sub3},{table_name_sub4},{table_name_sub5},{table_name_sub6},{table_name_sub7},{table_name_sub8},{table_name_sub9} from {table_name} where tbname in ("{table_name_sub1}","{table_name_sub2}","{table_name_sub3}","{table_name_sub4}","{table_name_sub5}","{table_name_sub6}","{table_name_sub7}","{table_name_sub8}","{table_name_sub9}") and ts >= "1980-01-01 00:00:00.000"')
tdSql.checkRows(0)
# TS-634
tdLog.info("test case for bug TS-634")
tdSql.execute("create database test")
tdSql.execute("use test")
tdSql.execute("create table meters (ts TIMESTAMP,voltage INT) TAGS (tableid INT)")
tdSql.execute("CREATE TABLE t1 USING meters TAGS (1)")
tdSql.execute("CREATE TABLE t2 USING meters TAGS (2)")
ts = 1605381041000
for i in range(10):
tdSql.execute("INSERT INTO t1 values(%d, %d)" % (ts + i, random.randint(0, 100)))
tdSql.execute("INSERT INTO t2 values(%d, %d)" % (ts + i, random.randint(0, 100)))
tdSql.query("select last_row(*), tbname from meters group by tbname order by ts desc")
tdSql.checkRows(2)
tdSql.execute("INSERT INTO t2 values(now, 2)")
tdSql.query("select last_row(*), tbname from meters group by tbname order by ts desc")
tdSql.checkRows(2)
def run(self):
tdSql.prepare()
self.queryGroupTbname()
......
......@@ -73,21 +73,18 @@ class TDTestCase:
tdSql.error(sql)
sql = 'select abs_max(c2) from db.stb'
tdSql.query(sql)
tdSql.checkData(0,0,1410065607)
tdSql.checkData(0,0,10000000199)
def test_udf_values(self):
tdSql.execute("drop function abs_max")
tdSql.execute("create function add_one as '/tmp/add_one.so' outputtype int")
tdSql.execute("create aggregate function abs_max as '/tmp/abs_max.so' outputtype bigint;")
tdSql.execute("create aggregate function sum_double as '/tmp/sum_double.so' outputtype int bufsize 128;")
tdSql.execute("create aggregate function sum_double as '/tmp/sum_double.so' outputtype bigint;")
# UDF bug no 1 -> follow 3 cases about this bug ;
# tdSql.error("create aggregate function max as '/tmp/abs_max.so' outputtype bigint ;")
# tdSql.error("create aggregate function avg as '/tmp/abs_max.so' outputtype bigint ;")
# tdSql.error("create aggregate function dbs as '/tmp/abs_max.so' outputtype bigint ;")
tdSql.execute("drop database if exists test")
tdSql.execute("create database test")
tdSql.execute("use test")
......@@ -117,7 +114,7 @@ class TDTestCase:
tdSql.execute("insert into bound values(%d, %d , %f, %d , %s)"%(epoch_time+1000,intdata2+1,float(intdata2+1),bigintdata2+1,"'binary"+str(intdata2+1)+"'"))
# check super table calculation results
tdSql.query("select add_one(id) from st")
tdSql.query("select add_one(id) test from st")
tdSql.checkData(0,0,1)
tdSql.checkData(1,0,2)
tdSql.checkData(4,0,5)
......@@ -157,29 +154,266 @@ class TDTestCase:
tdLog.info(" ====== unexpected error occured about UDF function =====")
sys.exit()
# UDF bug no 2 -> values of abs_max not inconsistent from common table and stable.
# tdSql.query("select abs_max(val) from st") # result is 0 rows
# tdSql.query("select abs_max(val) from tb1")
# tdSql.checkData(0,0,0) # this is error result
# tdSql.query("select sum_double(val) from st") # result is 0 rows
# tdSql.query("select sum_double(val) from tb1")
# tdSql.checkData(0,0,0) # this is error result
# UDF bug no 3 -> values of abs_max will error for boundary number
tdSql.query("select abs_max(val) from st")
tdSql.query("select abs_max(val) from tb1")
tdSql.checkRows(0)
tdSql.query("select sum_double(val) from st")
tdSql.query("select sum_double(val) from tb1")
tdSql.checkRows(0)
# check super table calculation results
# tdSql.query("select abs_max(number) from st")
# tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from st")
tdSql.checkData(0,0,9223372036854775807)
# check common table calculation results
tdSql.query("select abs_max(number) from tb1")
tdSql.checkData(0,0,400)
tdSql.query("select abs_max(number) from tb2")
tdSql.checkData(0,0,400)
tdSql.execute("select add_one(id) from st limit 10 offset 2")
tdSql.query("select add_one(id) from st where ts > 1604298064000 and ts < 1604298064020 ")
tdSql.checkData(0,0,1)
tdSql.checkData(1,0,-2147483644)
tdSql.query("select add_one(id) from tb1 where ts > 1604298064000 and ts < 1604298064020 ")
tdSql.checkData(0,0,1)
tdSql.query("select sum_double(id) from st where ts > 1604298064030 and ts < 1604298064060 ")
tdSql.checkData(0,0,14)
tdSql.query("select sum_double(id) from tb2 where ts > 1604298064030 and ts < 1604298064060 ")
tdSql.checkRows(0)
tdSql.query("select add_one(id) from st where ts = 1604298064000 ")
tdSql.checkData(0,0,-2147483645)
tdSql.query("select add_one(id) from st where ts > 1604298064000 and id in (2,3) and ind =1;")
tdSql.checkData(0,0,3)
tdSql.checkData(1,0,4)
tdSql.query("select id , add_one(id) from tb1 where ts > 1604298064000 and id in (2,3)")
tdSql.checkData(0,0,2)
tdSql.checkData(0,1,3)
tdSql.checkData(1,0,3)
tdSql.checkData(1,1,4)
tdSql.query("select sum_double(id) from tb1 where ts > 1604298064000 and id in (2,3)")
tdSql.checkData(0,0,10)
tdSql.query("select sum_double(id) from st where ts > 1604298064000 and id in (2,3) and ind =1")
tdSql.checkData(0,0,10)
tdSql.query("select abs_max(number) from st where ts > 1604298064000 and id in (2,3) and ind =1")
tdSql.checkData(0,0,300)
tdSql.query("select sum_double(id) from st where ts = 1604298064030 ")
tdSql.checkData(0,0,4)
tdSql.query("select abs_max(number) from st where ts = 1604298064100 ")
tdSql.checkData(0,0,9223372036854775806)
tdSql.query("select abs_max(number) from tb2 where ts = 1604298064100 ")
tdSql.checkData(0,0,400)
tdSql.query("select sum_double(id) from tb2 where ts = 1604298064100 ")
tdSql.checkData(0,0,8)
tdSql.query("select add_one(id) from st where ts >= 1604298064000 and ts <= 1604298064010")
tdSql.checkData(0,0,1)
tdSql.checkData(1,0,-2147483645)
tdSql.checkData(2,0,-2147483644)
tdSql.query("select add_one(id) from tb1 where ts >= 1604298064000 and ts <= 1604298064010")
tdSql.checkData(0,0,1)
tdSql.query("select sum_double(id) from st where ts >= 1604298064030 and ts <= 1604298064050")
tdSql.checkData(0,0,18)
tdSql.query("select sum_double(id) from tb2 where ts >= 1604298064030 and ts <= 1604298064100")
tdSql.checkData(0,0,20)
tdSql.query("select abs_max(number) from tb2 where ts >= 1604298064030 and ts <= 1604298064100")
tdSql.checkData(0,0,400)
tdSql.query("select abs_max(number) from st where ts >= 1604298064030 and ts <= 1604298064100")
tdSql.checkData(0,0,9223372036854775806)
tdSql.query("select id from st where id != 0 and ts >=1604298064070")
tdSql.checkData(0,0,1)
tdSql.query("select add_one(id) from st where id != 0 and ts >=1604298064070")
tdSql.checkData(0,0,2)
tdSql.query("select add_one(id) from st where id <> 0 and ts >=1604298064010")
tdSql.checkData(0,0,2)
tdSql.query("select sum_double(id) from st where id in (2,3,4) and ts >=1604298064070")
tdSql.checkData(0,0,18)
tdSql.query("select sum_double(id) from tb2 where id in (2,3,4) and ts >=1604298064070")
tdSql.checkData(0,0,18)
tdSql.query("select abs_max(number) from st where id in (2,3,4) and ts >=1604298064070")
tdSql.checkData(0,0,400)
tdSql.query("select add_one(id) from st where id = 0 ")
tdSql.checkData(0,0,1)
tdSql.checkData(1,0,1)
tdSql.query("select add_one(id) from tb2 where id = 0 ")
tdSql.checkData(0,0,1)
tdSql.query("select sum_double(id) from st where id = 1")
tdSql.checkData(0,0,4)
tdSql.query("select sum_double(id) from tb2 where id = 1")
tdSql.checkData(0,0,2)
tdSql.query("select add_one(id) from st where id is not null and ts >=1604298065000 ")
tdSql.checkData(0,0,None)
tdSql.query("select abs_max(number) from st where id is not null and ts >=1604298065000 ")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from bound where id is not null and ts >=1604298065000 ")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select sum_double(id) from st where id is not null and ts >=1604298064000 and ind = 1 ")
tdSql.checkData(0,0,20)
tdSql.query("select sum_double(id) from tb1 where id is not null and ts >=1604298064000 ")
tdSql.checkData(0,0,20)
tdSql.query("select add_one(id) from st where id is null and ts >=1604298065000 ")
tdSql.checkRows(0)
tdSql.query("select abs_max(number) from st where id is null and ts >=1604298065000 ")
tdSql.checkRows(0)
tdSql.query("select abs_max(number) from tb1 where id is null and ts >=1604298065000 ")
tdSql.checkRows(0)
tdSql.query("select add_one(id) from bound where id is not null and ts >=1604298065000;")
tdSql.checkData(0,0,None)
tdSql.query("select id,add_one(id) from bound;")
tdSql.checkRowCol(4,2)
tdSql.checkData(3,1,None)
tdSql.query("select add_one(id) from st where ts between 1604298064000 and 1604298064010")
tdSql.checkRows(3)
tdSql.query("select add_one(id) from tb1 where ts between 1604298064000 and 1604298064010")
tdSql.checkRows(1)
tdSql.query("select sum_double(id) from st where ts between 1604298064000 and 1604298064010 and id>=0")
tdSql.checkData(0,0,0)
tdSql.query("select sum_double(id) from tb1 where ts between 1604298064000 and 1604298064010 and id>=0")
tdSql.checkData(0,0,0)
tdSql.query("select add_one(id) from st where id in (1,2)")
tdSql.checkData(0,0,2)
tdSql.checkData(1,0,3)
tdSql.checkData(2,0,2)
tdSql.checkData(3,0,3)
tdSql.checkRows(4)
tdSql.query("select sum_double(id) from st where ts < now and ind =1 interval(1s)")
tdSql.checkData(0,1,20)
tdSql.error("select sum_double(id) from st where ts < now and ind =1 interval(3s) sliding (1s) fill (NULL) ")
tdSql.error("select sum_double(id) from st session(ts, 1s)")
tdSql.query("select sum_double(id) from tb1 session(ts, 1s)")
tdSql.checkData(0,1,20)
# intervals sliding values calculation
tdSql.query("select sum_double(id) from st where ts < now and ind =1 interval(3s) sliding (1s) limit 2")
tdSql.checkData(0,1,20)
tdSql.checkData(1,1,20)
# scalar_function can't work when using interval and sliding =========
tdSql.error("select add_one(id) from st where ts < now and ind =1 interval(3s) sliding (1s) limit 2 ")
tdSql.error("select add_one(id) from st order by ts")
tdSql.error("select ts,id,add_one(id) from st order by ts asc;")
# # UDF not support order by
tdSql.error("select ts,id,add_one(id) from st order by ts desc;")
# UDF function union all
tdSql.query("select add_one(id) from tb1 union all select add_one(id) from tb2;")
tdSql.checkRows(10)
tdSql.checkData(0,0,1)
tdSql.checkData(5,0,1)
tdSql.query("select sum_double(id) from tb1 union all select sum_double(id) from tb2;")
tdSql.checkRows(2)
tdSql.checkData(0,0,20)
tdSql.checkData(1,0,20)
tdSql.query("select abs_max(number) from tb1 union all select abs_max(number) from bound;")
tdSql.checkRows(2)
tdSql.checkData(0,0,400)
tdSql.checkData(1,0,9223372036854775807)
tdSql.execute("create stable stb (ts timestamp,id int , val double , number bigint, chars binary(200)) tags (ind int)")
tdSql.execute("create table stb1 using stb tags(3)")
tdSql.execute("insert into stb1 values(1604298064000 , 1 , 1.0 , 10000 ,'chars')")
tdSql.query("select add_one(id) from st union all select add_one(id) from stb;")
tdSql.checkRows(15)
tdSql.checkData(13,0,None)
tdSql.checkData(14,0,2)
tdSql.query("select add_one(id) from st union all select add_one(id) from stb1;")
tdSql.checkRows(15)
tdSql.checkData(13,0,None)
tdSql.checkData(14,0,2)
tdSql.query("select id ,add_one(id) from tb1 union all select id ,add_one(id) from stb1;")
tdSql.checkRows(6)
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,1)
tdSql.checkData(1,0,1)
tdSql.checkData(1,1,2)
# aggregate union all for different stables
tdSql.query("select sum_double(id) from st union all select sum_double(id) from stb;")
tdSql.checkRows(2)
tdSql.checkData(0,0,44)
tdSql.checkData(1,0,2)
tdSql.query("select id from st union all select id from stb1;")
tdSql.checkRows(15)
tdSql.query("select id from tb1 union all select id from stb1")
tdSql.checkRows(6)
tdSql.query("select sum_double(id) from tb1 union all select sum_double(id) from stb")
tdSql.checkData(0,0,20)
tdSql.checkData(1,0,2)
tdSql.query("select sum_double(id) from st union all select sum_double(id) from stb1;")
tdSql.checkRows(2)
tdSql.checkData(0,0,44)
tdSql.checkData(1,0,2)
tdSql.query("select abs_max(number) from st union all select abs_max(number) from stb;")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from bound union all select abs_max(number) from stb1;")
tdSql.checkData(0,0,9223372036854775807)
tdSql.checkData(1,0,10000)
tdSql.query("select abs_max(number) from st union all select abs_max(number) from stb1;")
tdSql.checkData(0,0,9223372036854775807)
tdSql.checkData(1,0,10000)
# group by for aggegate function ;
tdSql.query("select sum_double(id) from st group by tbname;")
tdSql.checkData(0,0,20)
tdSql.checkData(0,1,'tb1')
tdSql.checkData(1,0,20)
tdSql.checkData(1,1,'tb2')
tdSql.query("select sum_double(id) from st group by id;")
tdSql.checkRows(9)
tdSql.query("select sum_double(id) from st group by ts")
tdSql.checkRows(12)
tdSql.query("select sum_double(id) from st group by ind")
tdSql.checkRows(3)
tdSql.query("select sum_double(id) from st group by tbname order by ts asc;")
tdSql.query("select abs_max(number) from st group by id")
tdSql.checkRows(9)
tdSql.checkData(0,0,9223372036854775806)
tdSql.checkData(8,0,9223372036854775807)
tdSql.query("select abs_max(number) from st group by ts")
tdSql.checkRows(12)
tdSql.checkData(11,0,9223372036854775807)
tdSql.checkData(1,0,9223372036854775805)
tdSql.query("select abs_max(number) from st group by ind")
tdSql.checkRows(3)
tdSql.checkData(0,0,400)
tdSql.checkData(2,0,9223372036854775807)
# UDF join
tdSql.query("select add_one(tb1.id),add_one(bound.id) from tb1,bound where tb1.ts=bound.ts;")
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,-2147483644)
tdSql.query("select stb1.ts,add_one(stb1.id),bound.ts,add_one(bound.id) from stb1,bound where stb1.ts=bound.ts")
tdSql.checkData(0,1,2)
tdSql.checkData(0,3,-2147483645)
tdSql.query("select st.ts,add_one(st.id),stb.ts,add_one(stb.id) from st,stb where st.ts=stb.ts and st.ind=stb.ind")
tdSql.checkData(0,1,-2147483645)
tdSql.checkData(0,3,2)
tdSql.query("select sum_double(tb1.id),sum_double(bound.id) from tb1,bound where tb1.ts=bound.ts;")
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,-4294967290)
tdSql.query("select sum_double(stb1.id),sum_double(bound.id) from stb1,bound where stb1.ts=bound.ts")
tdSql.checkData(0,0,2)
tdSql.checkData(0,1,-4294967292)
#UDF join for stables
tdSql.query("select sum_double(st.id),sum_double(stb.id) from st,stb where st.ts=stb.ts and st.ind=stb.ind")
tdSql.checkData(0,0,-4294967292)
tdSql.checkData(0,1,2)
tdSql.query("select abs_max(tb1.number),abs_max(bound.number) from tb1,bound where tb1.ts=bound.ts;")
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,9223372036854775805)
tdSql.query("select abs_max(stb1.number),abs_max(bound.number) from stb1,bound where stb1.ts=bound.ts")
tdSql.checkData(0,0,10000)
tdSql.checkData(0,1,9223372036854775806)
tdSql.query("select abs_max(st.number),abs_max(stb.number) from st,stb where st.ts=stb.ts and st.ind=stb.ind")
tdSql.checkData(0,0,9223372036854775806)
tdSql.checkData(0,1,10000)
# check boundary
# tdSql.query("select abs_max(number) from bound")
# tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from bound")
tdSql.checkData(0,0,9223372036854775807)
tdLog.info("======= UDF function sum_double check =======")
......@@ -189,14 +423,10 @@ class TDTestCase:
tdSql.query("select sum_double(id) from tb1")
tdSql.checkData(0,0,20)
# UDF bug no 4 -> values error while two function work : it is limit that udf can't work with build-in functions.
# tdSql.query("select sum_double(id) , abs_max(number) from tb1")
# tdSql.checkData(0,0,20)
# tdSql.checkData(0,0,400)
# only one udf function in SQL can use ,follow errors notice.
tdSql.error("select sum_double(id) , abs_max(number) from tb1")
tdSql.error("select sum_double(id) , abs_max(number) from st")
# tdSql.query("select sum_double(id) , abs_max(number) from st")
# tdSql.checkData(0,0,44)
# tdSql.checkData(0,0,9223372036854775807)
# UDF not support mix up with build-in functions
# it seems like not support scalar_function mix up with aggregate functions
......@@ -204,147 +434,162 @@ class TDTestCase:
tdSql.error("select sum_double(id) ,add_one(id) from tb1")
tdSql.error("select sum_double(id) ,max(id) from st")
tdSql.error("select sum_double(id) ,max(id) from tb1")
tdSql.error("select twa(id),add_one(id) from st")
tdSql.error("select twa(id),add_one(id) from tb1")
# UDF function not support Arithmetic ===================
tdSql.query("select max(id) + 5 from st")
tdSql.query("select max(id) + 5 from tb1")
tdSql.query("select max(id) + avg(val) from st")
tdSql.query("select abs_max(number)*5 from st")
tdSql.checkData(0,0,46116860184273879040.000000000)
tdSql.query("select abs_max(number)*5 from tb1")
tdSql.checkData(0,0,2000.000000000)
tdSql.query("select max(id) + avg(val) from tb1")
tdSql.query("select abs_max(number) + 5 from st")
tdSql.query("select add_one(id) + 5 from st")
tdSql.checkData(4,0,10.000000000)
tdSql.query("select add_one(id)/5 from tb1")
tdSql.checkData(4,0,1.000000000)
tdSql.query("select sum_double(id)-5 from st")
tdSql.checkData(0,0,39.000000000)
tdSql.query("select sum_double(id)*5 from tb1")
tdSql.checkData(0,0,100.000000000)
tdSql.query("select abs_max(number) + 5 from tb1")
tdSql.error("select abs_max(number) + max(id) from st")
tdSql.query("select abs_max(number)*abs_max(val) from st")
tdSql.query("select sum_double(id) + sum_double(id) from st")
tdSql.checkData(0,0,88.000000000)
tdLog.info("======= UDF Nested query test =======")
tdSql.query("select sum(id) from (select id from st)")
tdSql.checkData(0,0,22)
#UDF bug no 5 -> not support Nested query
# tdSql.query("select abs_max(number) from (select number from st)")
# tdSql.checkData(0,0,9223372036854775807)
# tdSql.query("select abs_max(number) from (select number from bound)")
# tdSql.checkData(0,0,9223372036854775807)
# tdSql.query("select sum_double(id) from (select id from st)")
# tdSql.checkData(0,0,44)
# tdSql.query("select sum_double(id) from (select id from tb1)")
# tdSql.checkData(0,0,10)
#UDF bug -> Nested query
# outer nest query
tdSql.query("select abs_max(number) from (select number from st)")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from (select number from bound)")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select sum_double(id) from (select id from st)")
tdSql.checkData(0,0,44)
tdSql.query("select sum_double(id) from (select id from bound)")
tdSql.checkData(0,0,4)
tdSql.query("select add_one(id) from (select id from st);")
tdSql.checkRows(14)
tdSql.checkData(1,0,2)
tdSql.query("select add_one(id) from (select id from bound);")
tdSql.checkRows(4)
tdSql.checkData(1,0,-2147483644)
# inner nest query
tdSql.query("select id from (select add_one(id) id from st)")
tdSql.checkRows(14)
tdSql.checkData(13,0,None)
tdSql.query("select id from (select add_one(id) id from bound)")
tdSql.checkRows(4)
tdSql.checkData(3,0,None)
tdSql.query("select id from (select sum_double(id) id from bound)")
tdSql.checkData(0,0,4)
tdSql.query("select id from (select sum_double(id) id from st)") # it will crash taos shell
tdSql.checkData(0,0,44)
# UDF bug no 6 -> group by work error
tdLog.info("======= UDF work with group by =======")
tdSql.query("select id from (select abs_max(number) id from st)") # it will crash taos shell
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select id from (select abs_max(number) id from bound)")
tdSql.checkData(0,0,9223372036854775807)
# tdSql.query("select sum_double(id) from st group by tbname;")
# tdSql.checkData(0,0,6)
# tdSql.checkData(0,1,'tb1')
# tdSql.checkData(1,0,2)
# tdSql.checkData(1,1,'tb2')
# tdSql.query("select sum_double(id) from st group by id;")
# tdSql.checkRows(2)
# tdSql.query("select sum_double(id) from st group by tbname order by ts asc;")
# inner and outer nest query
tdSql.query("select add_one(id) from (select add_one(id) id from st)")
tdSql.checkRows(14)
tdSql.checkData(0,0,2)
tdSql.checkData(1,0,3)
tdSql.query("select sum_double(id) from st where ts < now and ind =1 interval(1s)")
tdSql.checkData(0,1,20)
tdSql.error("select sum_double(id) from st session(ts, 1s) interval (10s,1s) sliding(10s) fill (NULL) ")
tdSql.error("select sum_double(id) from st session(ts, 1s)")
tdSql.query("select sum_double(id) from tb1 session(ts, 1s)")
tdSql.checkData(0,1,20)
tdSql.query("select add_one(id) from (select add_one(id) id from tb1)")
tdSql.checkRows(5)
tdSql.checkData(0,0,2)
tdSql.checkData(1,0,3)
# UDF -> bug no 7 : intervals sliding values calculation error
# tdSql.query("select sum_double(id) from st where ts < now and ind =1 interval(3s) sliding (1s) limit 2")
# tdSql.checkData(0,1,20)
# tdSql.checkData(1,1,20)
tdSql.query("select sum_double(sumdb) from (select sum_double(id) sumdb from st)")
tdSql.query("select sum_double(sumdb) from (select sum_double(id) sumdb from tb1)")
tdSql.query("select abs_max(number) from (select abs_max(number) number from st)")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from (select abs_max(number) number from bound)")
tdSql.checkData(0,0,9223372036854775807)
# nest inner and outer with build-in func
tdSql.query("select max(number) from (select abs_max(number) number from st)")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select max(number) from (select abs_max(number) number from bound)")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select sum_double(sumdb) from (select sum_double(id) sumdb from st)")
tdSql.query("select sum(sumdb) from (select sum_double(id) sumdb from tb1)")
tdSql.checkData(0,0,20)
# scalar_function can't work when using interval and sliding =========
tdSql.error("select add_one(id) from st where ts < now and ind =1 interval(3s) sliding (1s) limit 2 ")
tdLog.info(" =====================test illegal creation method =====================")
tdSql.execute("drop function add_one")
# tdSql.execute("drop function add_one")
tdSql.execute("drop function abs_max")
tdSql.execute("drop function sum_double")
tdSql.execute("create aggregate function error_use1 as '/tmp/abs_max.so' outputtype bigint ")
tdSql.error("select error_use1(number) from st")
# UDF -> bug no 8: error return values when create aggregate functions as an scalar_function
# illega UDF create aggregate functions as an scalar_function
# with no aggregate
# tdSql.execute("create function abs_max as '/tmp/abs_max.so' outputtype bigint bufsize 128")
# tdSql.query("select abs_max(number) from st") # this bug will return 3 rows
# tdSql.checkRows(1)
# tdSql.execute("create function sum_double as '/tmp/sum_double.so' outputtype bigint bufsize 128")
# tdSql.execute("select sum_double(id) from st")
# tdSql.checkRows(1)
# UDF -> bug no 9: give bufsize for scalar_function add_one;
# UDF -> need improve : when outputtype is not match datatype which is defined in function codes
tdSql.execute("create function add_one as '/tmp/add_one.so' outputtype bigint bufsize 128")
# tdSql.error("select add_one(val) from st") # it should return error not [] for not match col datatype
# tdSql.query("select add_one(id) from st") # return error query result
# tdSql.checkData(0,0,1)
# tdSql.checkData(1,0,2)
# tdSql.checkData(5,0,1)
# tdSql.checkData(10,0,-2147483645)
# tdSql.checkData(13,0,None)
tdSql.execute("create function abs_max as '/tmp/abs_max.so' outputtype bigint bufsize 128")
tdSql.error("select abs_max(number) from st")
tdSql.execute("create function sum_double as '/tmp/sum_double.so' outputtype bigint bufsize 128")
tdSql.error("select sum_double(id) from st")
# UDF -> improve : aggregate function with no bufsize : it seems with no affect
# tdSql.execute("drop function abs_max")
# tdSql.execute("drop function sum_double")
tdSql.execute("drop function abs_max")
tdSql.execute("drop function sum_double")
tdSql.execute("create aggregate function abs_max as '/tmp/abs_max.so' outputtype bigint ")
tdSql.execute("create aggregate function sum_double as '/tmp/sum_double.so' outputtype int ")
tdSql.query("select sum_double(id) from st")
tdSql.checkData(0,0,44)
tdSql.query("select sum_double(id) from tb1")
tdSql.checkData(0,0,20)
# tdSql.query("select abs_max(number) from st")
# tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from st")
tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from tb1")
tdSql.checkData(0,0,400)
#UDF bug no 10 -> create function datatype of outputtype not match col datatype
tdSql.execute("drop function abs_max")
tdSql.execute("drop function sum_double")
tdSql.execute("drop function add_one")
tdSql.execute("create function add_one as '/tmp/add_one.so' outputtype bigint;")
tdSql.execute("create aggregate function abs_max as '/tmp/abs_max.so' outputtype int bufsize 128;")
tdSql.execute("create aggregate function sum_double as '/tmp/sum_double.so' outputtype double bufsize 128;")
# tdSql.query("select sum_double(id) from st") this bug will return 0.000000
# tdSql.checkData(0,0,44)
# tdSql.query("select sum_double(id) from tb1")
# tdSql.checkData(0,0,20) this bug will return 0.000000
# tdSql.query("select add_one(id) from st") this bug will return series error values
# tdSql.checkData(0,0,1)
# tdSql.checkData(1,0,2)
# tdSql.checkData(5,0,1)
# tdSql.checkData(10,0,-2147483645)
# tdSql.checkData(13,0,None)
# tdSql.query("select add_one(id) from tb1") this bug will return series error values
# tdSql.checkData(0,0,1)
# tdSql.checkData(2,0,3)
# tdSql.query("select abs_max(id) from st")
# tdSql.checkData(0,0,9223372036854775807)
tdSql.query("select abs_max(number) from tb1") # it seems work well
tdSql.checkData(0,0,400)
# UDF scalar function not support group by
tdSql.error("select add_one(id) from st group by tbname")
# UDF bug no 11 -> follow test case will coredump for taosd and let data lost
# tdSql.query("select add_one(id) from st group by tbname")
# UDF -> bug no 12: give aggregate for scalar_function add_one ,it will let taosd coredump as data lost
# tdSql.execute("drop function add_one")
# tdSql.execute("create aggregate function add_one as '/tmp/add_one.so' outputtype bigint bufsize 128")
# tdSql.query("select add_one(id) from st")
# UDF bug no 13 -> follow test case will coredump for taosc
# tdSql.query("select add_one(*) from st ")
# tdSql.query("select add_one(*) from tb1 ")
# UDF bug no 14 -> follow test case will coredump for taosc
# tdSql.query("select abs_max(id),abs_max(number) from st ")
# tdSql.query("select abs_max(number),abs_max(number) from st ")
# tdSql.query("select sum_double(id),sum_double(id) from st ")
# UDF : give aggregate for scalar_function add_one ,it can't work well
tdSql.execute("drop function add_one")
tdSql.execute("create aggregate function add_one as '/tmp/add_one.so' outputtype bigint bufsize 128")
tdSql.error("select add_one(id) from st")
# udf must give col list
tdSql.error("select add_one(*) from st ")
tdSql.error("select add_one(*) from tb1 ")
# one udf function can multi use
tdSql.query("select abs_max(id),abs_max(number) from st ")
tdSql.query("select abs_max(number),abs_max(number)*3 from st ")
tdSql.query("select abs_max(number),abs_max(number)*3 from tb1 ")
tdSql.query("select sum_double(id),sum_double(id) from st ")
def run(self):
tdSql.prepare()
......
......@@ -178,6 +178,8 @@ class TDTestCase:
tdSql.checkRows(1)
self.tb193new = "table_193~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST123"
tdSql.error("create table db.`%s` using db.`%s` tags(1)" %(self.tb193new,self.stb1))
# case for TD-10691
tdSql.error("create table ttb1(ts timestamp, file int )")
......
......@@ -1765,7 +1765,7 @@ int stmt_funcb_autoctb_e1(TAOS_STMT *stmt) {
int code = taos_stmt_prepare(stmt, sql, 0);
if (code != 0){
printf("failed to execute taos_stmt_prepare. error:%s\n", taos_stmt_errstr(stmt));
return -1;
exit(1);
}
int id = 0;
......@@ -1801,9 +1801,44 @@ int stmt_funcb_autoctb_e1(TAOS_STMT *stmt) {
return 0;
}
int stmt_multi_insert_check(TAOS_STMT *stmt) {
char *sql;
// The number of tag column list is not equal to the number of tag value list
sql = "insert into ? using stb1 (id1) tags(1,?) values(?,?,?,?,?,?,?,?,?,?)";
if (0 == taos_stmt_prepare(stmt, sql, 0)) {
printf("failed to check taos_stmt_prepare. sql:%s\n", sql);
exit(1);
}
// The number of column list is not equal to the number of value list
sql = "insert into ? using stb1 tags(1,?,2,?,4,?,6.0,?,'b') "
"(ts, b, v1, v2, v4, v8, f4, f8, bin) values(?,?,?,?,?,?,?,?,?,?)";
if (0 == taos_stmt_prepare(stmt, sql, 0)) {
printf("failed to check taos_stmt_prepare. sql:%s\n", sql);
exit(1);
}
sql = "insert into ? using stb1 () tags(1,?) values(?,?,?,?,?,?,?,?,?,?)";
if (0 == taos_stmt_prepare(stmt, sql, 0)) {
printf("failed to check taos_stmt_prepare. sql:%s\n", sql);
exit(1);
}
sql = "insert into ? using stb1 ( tags(1,?) values(?,?,?,?,?,?,?,?,?,?)";
if (0 == taos_stmt_prepare(stmt, sql, 0)) {
printf("failed to check taos_stmt_prepare. sql:%s\n", sql);
exit(1);
}
sql = "insert into ? using stb1 ) tags(1,?) values(?,?,?,?,?,?,?,?,?,?)";
if (0 == taos_stmt_prepare(stmt, sql, 0)) {
printf("failed to check taos_stmt_prepare. sql:%s\n", sql);
exit(1);
}
return 0;
}
//1 tables 10 records
int stmt_funcb_autoctb_e2(TAOS_STMT *stmt) {
......@@ -4509,7 +4544,6 @@ void* runcase(void *par) {
(void)idx;
#if 1
prepare(taos, 1, 1);
......@@ -4823,6 +4857,16 @@ void* runcase(void *par) {
#endif
#if 1
prepare(taos, 1, 0);
stmt = taos_stmt_init(taos);
printf("stmt_multi_insert_check start\n");
stmt_multi_insert_check(stmt);
printf("stmt_multi_insert_check end\n");
taos_stmt_close(stmt);
#endif
#if 1
prepare(taos, 1, 1);
......@@ -5011,7 +5055,6 @@ void* runcase(void *par) {
printf("check result end\n");
#endif
#if 1
preparem(taos, 0, idx);
......
......@@ -10,7 +10,7 @@ sql connect
print ======================== dnode1 start
sql create function add_one as '/tmp/add_one.so' outputtype int;
sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype int;
sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype bigint;
sql show functions;
if $rows != 2 then
return -1
......
......@@ -11,7 +11,7 @@ print ======================== dnode1 start
sql create function add_one as '/tmp/add_one.so' outputtype int;
sql create function add_one_64232 as '/tmp/add_one_64232.so' outputtype int;
sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype int;
sql create aggregate function sum_double as '/tmp/sum_double.so' outputtype bigint;
sql show functions;
if $rows != 3 then
return -1
......
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
typedef struct SUdfInit{
int maybe_null; /* 1 if function can return NULL */
int decimals; /* for real functions */
long long length; /* For string functions */
int64_t length; /* For string functions */
char *ptr; /* free pointer for function data */
int const_item; /* 0 if result is independent of arguments */
} SUdfInit;
......@@ -14,31 +15,36 @@ typedef struct SUdfInit{
#define TSDB_DATA_INT_NULL 0x80000000L
#define TSDB_DATA_BIGINT_NULL 0x8000000000000000L
void abs_max(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput,
void abs_max(char* data, short itype, short ibytes, int numOfRows, int64_t* ts, char* dataOutput, char* interBuf, char* tsOutput,
int* numOfOutput, short otype, short obytes, SUdfInit* buf) {
int i;
long r = 0;
printf("abs_max input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
int64_t r = 0;
// printf("abs_max input data:%p, type:%d, rows:%d, ts:%p, %" PRId64 ", dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
if (itype == 5) {
r=*(long *)dataOutput;
r=*(int64_t *)dataOutput;
*numOfOutput=0;
for(i=0;i<numOfRows;++i) {
if (*((long *)data + i) == TSDB_DATA_BIGINT_NULL) {
if (*((int64_t *)data + i) == TSDB_DATA_BIGINT_NULL) {
continue;
}
*numOfOutput=1;
long v = labs(*((long *)data + i));
//int64_t v = abs(*((int64_t *)data + i));
int64_t v = *((int64_t *)data + i);
if (v < 0) {
v = 0 - v;
}
if (v > r) {
r = v;
}
}
*(long *)dataOutput=r;
*(int64_t *)dataOutput=r;
printf("abs_max out, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput);
} else {
// printf("abs_max out, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput);
}else {
*numOfOutput=0;
}
}
......@@ -47,44 +53,43 @@ void abs_max(char* data, short itype, short ibytes, int numOfRows, long long* ts
void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) {
int i;
int r = 0;
printf("abs_max_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p\n", dataOutput, *dataOutput, *numOfOutput, buf);
printf("abs_max finalize, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput);
//int64_t r = 0;
// printf("abs_max_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p\n", dataOutput, *dataOutput, *numOfOutput, buf);
// *numOfOutput=1;
// printf("abs_max finalize, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput);
}
void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) {
int r = 0;
int64_t r = 0;
if (numOfRows > 0) {
r = *((long *)data);
r = *((int64_t *)data);
}
printf("abs_max_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf);
// printf("abs_max_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf);
for (int i = 1; i < numOfRows; ++i) {
printf("abs_max_merge %d - %ld\n", i, *((long *)data + i));
if (*((long*)data + i) > r) {
r= *((long*)data + i);
// printf("abs_max_merge %d - %" PRId64"\n", i, *((int64_t *)data + i));
if (*((int64_t*)data + i) > r) {
r= *((int64_t*)data + i);
}
}
*(long*)dataOutput=r;
*(int64_t*)dataOutput=r;
if (numOfRows > 0) {
*numOfOutput=1;
} else {
*numOfOutput=0;
}
printf("abs_max_merge, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput);
// printf("abs_max_merge, dataoutput:%" PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput);
}
int abs_max_init(SUdfInit* buf) {
printf("abs_max init\n");
// printf("abs_max init\n");
return 0;
}
void abs_max_destroy(SUdfInit* buf) {
printf("abs_max destroy\n");
// printf("abs_max destroy\n");
}
\ No newline at end of file
......@@ -14,20 +14,18 @@ void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts
int* numOfOutput, short otype, short obytes, SUdfInit* buf) {
int i;
int r = 0;
printf("add_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
// printf("add_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
if (itype == 4) {
for(i=0;i<numOfRows;++i) {
printf("input %d - %d", i, *((int *)data + i));
// printf("input %d - %d", i, *((int *)data + i));
*((int *)dataOutput+i)=*((int *)data + i) + 1;
printf(", output %d\n", *((int *)dataOutput+i));
// printf(", output %d\n", *((int *)dataOutput+i));
if (tsOutput) {
*(long long*)tsOutput=1000000;
}
}
*numOfOutput=numOfRows;
printf("add_one out, numOfOutput:%d\n", *numOfOutput);
// printf("add_one out, numOfOutput:%d\n", *numOfOutput);
}
}
\ No newline at end of file
......@@ -29,5 +29,3 @@ void add_one_64232(char* data, short itype, short ibytes, int numOfRows, long lo
printf("add_one_64232 out, numOfOutput:%d\n", *numOfOutput);
}
}
\ No newline at end of file
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
typedef struct SUdfInit{
int maybe_null; /* 1 if function can return NULL */
int decimals; /* for real functions */
long long length; /* For string functions */
int64_t length; /* For string functions */
char *ptr; /* free pointer for function data */
int const_item; /* 0 if result is independent of arguments */
} SUdfInit;
......@@ -13,13 +14,13 @@ typedef struct SUdfInit{
#define TSDB_DATA_INT_NULL 0x80000000L
void sum_double(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput,
void sum_double(char* data, short itype, short ibytes, int numOfRows, int64_t* ts, char* dataOutput, char* interBuf, char* tsOutput,
int* numOfOutput, short otype, short obytes, SUdfInit* buf) {
int i;
int r = 0;
printf("sum_double input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
int64_t r = 0;
printf("sum_double input data:%p, type:%d, rows:%d, ts:%p,%"PRId64", dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
if (itype == 4) {
r=*(int *)dataOutput;
r=*(int64_t *)dataOutput;
*numOfOutput=0;
for(i=0;i<numOfRows;++i) {
......@@ -29,10 +30,10 @@ void sum_double(char* data, short itype, short ibytes, int numOfRows, long long*
*numOfOutput=1;
r+=*((int *)data + i);
*(int *)dataOutput=r;
*(int64_t *)dataOutput=r;
}
printf("sum_double out, dataoutput:%d, numOfOutput:%d\n", *(int *)dataOutput, *numOfOutput);
// printf("sum_double out, dataoutput:%"PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput);
}
}
......@@ -40,45 +41,44 @@ void sum_double(char* data, short itype, short ibytes, int numOfRows, long long*
void sum_double_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) {
int i;
int r = 0;
printf("sum_double_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p\n", dataOutput, *dataOutput, *numOfOutput, buf);
*numOfOutput=1;
*(int*)(buf->ptr)=*(int*)dataOutput*2;
*(int*)dataOutput=*(int*)(buf->ptr);
printf("sum_double finalize, dataoutput:%d, numOfOutput:%d\n", *(int *)dataOutput, *numOfOutput);
int64_t r = 0;
// printf("sum_double_finalize dataoutput:%p:%"PRId64", numOfOutput:%d, buf:%p\n", dataOutput, *(int64_t*)dataOutput, *numOfOutput, buf);
// *numOfOutput=1;
*(int64_t*)(buf->ptr)=*(int64_t*)dataOutput*2;
*(int64_t*)dataOutput=*(int64_t*)(buf->ptr);
// printf("sum_double finalize, dataoutput:%"PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput);
}
void sum_double_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) {
void sum_double_merge(char* data, int32_t numOfRows, char* dataOutput, int* numOfOutput, SUdfInit* buf) {
int r = 0;
int sum = 0;
int64_t sum = 0;
printf("sum_double_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf);
// printf("sum_double_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf);
for (int i = 0; i < numOfRows; ++i) {
printf("sum_double_merge %d - %d\n", i, *((int*)data + i));
sum +=*((int*)data + i);
// printf("sum_double_merge %d - %"PRId64"\n", i, *((int64_t*)data + i));
sum +=*((int64_t*)data + i);
}
*(int*)dataOutput+=sum;
*(int64_t*)dataOutput+=sum;
if (numOfRows > 0) {
*numOfOutput=1;
} else {
*numOfOutput=0;
}
printf("sum_double_merge, dataoutput:%d, numOfOutput:%d\n", *(int *)dataOutput, *numOfOutput);
// printf("sum_double_merge, dataoutput:%"PRId64", numOfOutput:%d\n", *(int64_t *)dataOutput, *numOfOutput);
}
int sum_double_init(SUdfInit* buf) {
buf->maybe_null=1;
buf->ptr = malloc(sizeof(int));
printf("sum_double init\n");
buf->ptr = malloc(sizeof(int64_t));
// printf("sum_double init\n");
return 0;
}
void sum_double_destroy(SUdfInit* buf) {
free(buf->ptr);
printf("sum_double destroy\n");
// printf("sum_double destroy\n");
}
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册