未验证 提交 4ba0e990 编写于 作者: H Hui Li 提交者: GitHub

Merge pull request #8486 from taosdata/feature/d6

merge master branch int develop
......@@ -201,7 +201,7 @@ pipeline {
stage('pre_build'){
agent{label 'master'}
options { skipDefaultCheckout() }
when{
when {
changeRequest()
}
steps {
......@@ -321,22 +321,10 @@ pipeline {
'''
sh '''
cd ${WKC}/src/connector/node-rest/
npm install
npm run build
npm run build:test
npm run test
'''
sh '''
cd ${WKC}/tests/examples/C#/taosdemo
mcs -out:taosdemo *.cs > /dev/null 2>&1
echo '' |./taosdemo -c /etc/taos
cd ${WKC}/tests/connectorTest/C#Test/nanosupport
mcs -out:nano *.cs > /dev/null 2>&1
echo '' |./nano
'''
sh '''
cd ${WKC}/tests/gotest
......
......@@ -213,7 +213,7 @@ else
exit 1
fi
make
make -j8
cd ${curr_dir}
......
......@@ -155,6 +155,7 @@ bool tscIsSessionWindowQuery(SQueryInfo* pQueryInfo);
bool tscIsSecondStageQuery(SQueryInfo* pQueryInfo);
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo);
bool tscGroupbyColumn(SQueryInfo* pQueryInfo);
bool tscGroupbyTag(SQueryInfo* pQueryInfo);
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo);
bool tscIsTopBotQuery(SQueryInfo* pQueryInfo);
bool hasTagValOutput(SQueryInfo* pQueryInfo);
......
......@@ -762,10 +762,11 @@ void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
if (!dataBuf->ordered) {
char *pBlockData = pBlocks->data;
qsort(pBlockData, pBlocks->numOfRows, dataBuf->rowSize, rowDataCompar);
dataBuf->ordered = true;
if(tsClientMerge) {
int32_t i = 0;
int32_t j = 1;
while (j < pBlocks->numOfRows) {
TSKEY ti = *(TSKEY *)(pBlockData + dataBuf->rowSize * i);
TSKEY tj = *(TSKEY *)(pBlockData + dataBuf->rowSize * j);
......@@ -774,7 +775,6 @@ void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
if (dataBuf->pTableMeta && dataBuf->pTableMeta->tableInfo.update != TD_ROW_DISCARD_UPDATE) {
memmove(pBlockData + dataBuf->rowSize * i, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
}
++j;
continue;
}
......@@ -783,15 +783,12 @@ void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks *dataBuf) {
if (nextPos != j) {
memmove(pBlockData + dataBuf->rowSize * nextPos, pBlockData + dataBuf->rowSize * j, dataBuf->rowSize);
}
++j;
}
dataBuf->ordered = true;
pBlocks->numOfRows = i + 1;
dataBuf->size = sizeof(SSubmitBlk) + dataBuf->rowSize * pBlocks->numOfRows;
}
}
dataBuf->prevTS = INT64_MIN;
}
......@@ -836,7 +833,9 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
if (!dataBuf->ordered) {
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
qsort(pBlkKeyTuple, nRows, sizeof(SBlockKeyTuple), rowDataCompar);
dataBuf->ordered = true;
if(tsClientMerge) {
pBlkKeyTuple = pBlkKeyInfo->pKeyTuple;
int32_t i = 0;
int32_t j = 1;
......@@ -859,10 +858,9 @@ int tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf, SBlockKeyInfo *pBlk
}
++j;
}
dataBuf->ordered = true;
pBlocks->numOfRows = i + 1;
}
}
dataBuf->size = sizeof(SSubmitBlk) + pBlocks->numOfRows * extendedRowSize;
dataBuf->prevTS = INT64_MIN;
......
......@@ -5022,6 +5022,7 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
const char* msg1 = "super table join requires tags column";
const char* msg2 = "timestamp join condition missing";
const char* msg3 = "condition missing for join query";
const char* msg4 = "only ts column join allowed";
if (!QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
if (pQueryInfo->numOfTables == 1) {
......@@ -5039,6 +5040,8 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr
if (pCondExpr->pJoinExpr == NULL) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
} else if ((!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) && pCondExpr->pJoinExpr) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg4);
}
if (!pCondExpr->tsJoin) {
......@@ -5593,10 +5596,14 @@ int32_t validateFillNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNo
const char* msg4 = "illegal value or data overflow";
const char* msg5 = "fill only available for interval query";
const char* msg6 = "not supported function now";
const char* msg7 = "join query not supported fill operation";
if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if(QUERY_IS_JOIN_QUERY(pQueryInfo->type)) {
return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7);
}
/*
* fill options are set at the end position, when all columns are set properly
......@@ -5951,6 +5958,10 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq
return invalidOperationMsg(pMsgBuf, msg11);
}
if (udf) {
return invalidOperationMsg(pMsgBuf, msg11);
}
tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0);
pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId;
pQueryInfo->groupbyExpr.orderType = p1->sortOrder;
......@@ -9212,6 +9223,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf
pQueryInfo->simpleAgg = isSimpleAggregateRv(pQueryInfo);
pQueryInfo->onlyTagQuery = onlyTagPrjFunction(pQueryInfo);
pQueryInfo->groupbyColumn = tscGroupbyColumn(pQueryInfo);
pQueryInfo->groupbyTag = tscGroupbyTag(pQueryInfo);
pQueryInfo->arithmeticOnAgg = tsIsArithmeticQueryOnAggResult(pQueryInfo);
pQueryInfo->orderProjectQuery = tscOrderedProjectionQueryOnSTable(pQueryInfo, 0);
......
......@@ -752,7 +752,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *,
void taos_close_stream(TAOS_STREAM *handle) {
SSqlStream *pStream = (SSqlStream *)handle;
SSqlObj *pSql = (SSqlObj *)atomic_exchange_ptr(&pStream->pSql, 0);
SSqlObj *pSql = pStream->pSql;
if (pSql == NULL) {
return;
}
......@@ -763,13 +763,13 @@ void taos_close_stream(TAOS_STREAM *handle) {
*/
if (pSql->signature == pSql) {
tscRemoveFromStreamList(pStream, pSql);
pStream->pSql = NULL;
taosTmrStopA(&(pStream->pTimer));
tscDebug("0x%"PRIx64" stream:%p is closed", pSql->self, pStream);
// notify CQ to release the pStream object
pStream->fp(pStream->param, NULL, NULL);
pStream->pSql = NULL;
taos_free_result(pSql);
tfree(pStream);
......
......@@ -2876,7 +2876,7 @@ void tscHandleSubqueryError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numO
SSqlObj *userSql = pParentSql->rootObj;
if ((code == TSDB_CODE_TDB_INVALID_TABLE_ID || code == TSDB_CODE_VND_INVALID_VGROUP_ID) && userSql->retry < userSql->maxRetry) {
if (userSql != pParentSql) {
if (userSql != pParentSql && pParentSql->freeParam != NULL) {
(*pParentSql->freeParam)(&pParentSql->param);
}
......@@ -3729,6 +3729,25 @@ static UNUSED_FUNC bool tscHasRemainDataInSubqueryResultSet(SSqlObj *pSql) {
return hasData;
}
void tscSetQuerySort(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr) {
if (pQueryInfo->interval.interval <= 0) {
return;
}
if (pQueryInfo->pUpstream != NULL && taosArrayGetSize(pQueryInfo->pUpstream) > 0) {
size_t size = taosArrayGetSize(pQueryInfo->pUpstream);
for(int32_t i = 0; i < size; ++i) {
SQueryInfo* pq = taosArrayGetP(pQueryInfo->pUpstream, i);
if (pq->groupbyTag && pq->interval.interval > 0) {
pQueryAttr->needSort = true;
return;
}
}
}
}
void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pSourceOperator,
char* sql, void* merger, int32_t stage, uint64_t qId) {
assert(pQueryInfo != NULL);
......@@ -3831,6 +3850,7 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr
SArray* pa = NULL;
if (stage == MASTER_SCAN) {
pQueryAttr->createFilterOperator = false; // no need for parent query
tscSetQuerySort(pQueryInfo, pQueryAttr);
pa = createExecOperatorPlan(pQueryAttr);
} else {
pa = createGlobalMergePlan(pQueryAttr);
......
......@@ -414,6 +414,19 @@ bool tscGroupbyColumn(SQueryInfo* pQueryInfo) {
return false;
}
bool tscGroupbyTag(SQueryInfo* pQueryInfo) {
SGroupbyExpr* pGroupbyExpr = &pQueryInfo->groupbyExpr;
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
SColIndex* pIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
if (TSDB_COL_IS_TAG(pIndex->flag)) { // group by tag
return true;
}
}
return false;
}
int32_t tscGetTopBotQueryExprIndex(SQueryInfo* pQueryInfo) {
size_t numOfExprs = tscNumOfExprs(pQueryInfo);
......@@ -1256,6 +1269,7 @@ static void createInputDataFilterInfo(SQueryInfo* px, int32_t numOfCol1, int32_t
}
*/
void handleDownstreamOperator(SSqlObj** pSqlObjList, int32_t numOfUpstream, SQueryInfo* px, SSqlObj* pSql) {
SSqlRes* pOutput = &pSql->res;
......
......@@ -339,7 +339,9 @@ static FORCE_INLINE void dataColReset(SDataCol *pDataCol) { pDataCol->len = 0; }
int tdAllocMemForCol(SDataCol *pCol, int maxPoints);
void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints);
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints);
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints, int rowOffset);
void dataColSetOffset(SDataCol *pCol, int nEle);
bool isNEleNull(SDataCol *pCol, int nEle);
......@@ -670,7 +672,7 @@ static FORCE_INLINE char *memRowEnd(SMemRow row) {
#define memRowDeleted(r) TKEY_IS_DELETED(memRowTKey(r))
SMemRow tdMemRowDup(SMemRow row);
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull);
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset);
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetMemRowDataOfCol(void *row, int16_t colId, int8_t colType, uint16_t offset) {
......
......@@ -217,6 +217,8 @@ extern int32_t wDebugFlag;
extern int32_t cqDebugFlag;
extern int32_t debugFlag;
extern int8_t tsClientMerge;
#ifdef TD_TSZ
// lossy
extern char lossyColumns[];
......@@ -232,6 +234,7 @@ extern int8_t tsDeadLockKillQuery;
// schemaless
extern char tsDefaultJSONStrType[];
typedef struct {
char dir[TSDB_FILENAME_LEN];
int level;
......
......@@ -239,9 +239,12 @@ void dataColInit(SDataCol *pDataCol, STColumn *pCol, int maxPoints) {
pDataCol->len = 0;
}
// value from timestamp should be TKEY here instead of TSKEY
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints) {
ASSERT(pCol != NULL && value != NULL);
/**
* value from timestamp should be TKEY here instead of TSKEY.
* - rowOffset: 0 for current row, -1 for previous row
*/
int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPoints, int rowOffset) {
ASSERT(pCol != NULL && value != NULL && (rowOffset == 0 || rowOffset == -1));
if (isAllRowsNull(pCol)) {
if (isNull(value, pCol->type)) {
......@@ -257,6 +260,7 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo
}
if (IS_VAR_DATA_TYPE(pCol->type)) {
if (rowOffset == 0) {
// set offset
pCol->dataOff[numOfRows] = pCol->len;
// Copy data
......@@ -264,10 +268,22 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo
// Update the length
pCol->len += varDataTLen(value);
} else {
ASSERT(pCol->len == TYPE_BYTES[pCol->type] * numOfRows);
memcpy(POINTER_SHIFT(pCol->pData, pCol->len), value, pCol->bytes);
// Copy data
void *lastValue = POINTER_SHIFT(pCol->pData, pCol->dataOff[numOfRows]);
int lastValLen = varDataTLen(lastValue);
memcpy(lastValue, value, varDataTLen(value));
// Update the length
pCol->len -= lastValLen;
pCol->len += varDataTLen(value);
}
} else {
// update the value of last row with increasing the pCol->len and keeping the numOfRows for partial update
ASSERT(pCol->len == (TYPE_BYTES[pCol->type] * (numOfRows - rowOffset)));
memcpy(POINTER_SHIFT(pCol->pData, (pCol->len + rowOffset * TYPE_BYTES[pCol->type])), value, pCol->bytes);
if (rowOffset == 0) {
pCol->len += pCol->bytes;
}
}
return 0;
}
......@@ -441,7 +457,8 @@ void tdResetDataCols(SDataCols *pCols) {
}
}
static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull,
int rowOffset) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < dataRowKey(row));
int rcol = 0;
......@@ -451,7 +468,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
dcol++;
continue;
}
......@@ -460,14 +477,14 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
if(forceSetNull || setCol) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
}
dcol++;
}
......@@ -475,7 +492,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
pCols->numOfRows++;
}
static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset) {
ASSERT(pCols->numOfRows == 0 || dataColsKeyLast(pCols) < kvRowKey(row));
int rcol = 0;
......@@ -487,7 +504,7 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
++dcol;
continue;
}
......@@ -497,14 +514,14 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints);
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
++dcol;
++rcol;
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
if(forceSetNull || setCol) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints);
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
}
++dcol;
}
......@@ -512,11 +529,11 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
pCols->numOfRows++;
}
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull) {
void tdAppendMemRowToDataCol(SMemRow row, STSchema *pSchema, SDataCols *pCols, bool forceSetNull, int rowOffset) {
if (isDataRow(row)) {
tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull);
tdAppendDataRowToDataCol(memRowDataBody(row), pSchema, pCols, forceSetNull, rowOffset);
} else if (isKvRow(row)) {
tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull);
tdAppendKvRowToDataCol(memRowKvBody(row), pSchema, pCols, forceSetNull, rowOffset);
} else {
ASSERT(0);
}
......@@ -539,7 +556,7 @@ int tdMergeDataCols(SDataCols *target, SDataCols *source, int rowsToMerge, int *
for (int j = 0; j < source->numOfCols; j++) {
if (source->cols[j].len > 0 || target->cols[j].len > 0) {
dataColAppendVal(target->cols + j, tdGetColDataOfRow(source->cols + j, i + (*pOffset)), target->numOfRows,
target->maxPoints);
target->maxPoints, 0);
}
}
target->numOfRows++;
......@@ -583,7 +600,7 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
ASSERT(target->cols[i].type == src1->cols[i].type);
if (src1->cols[i].len > 0 || target->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints);
target->maxPoints, 0);
}
}
......@@ -595,10 +612,10 @@ static void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, i
ASSERT(target->cols[i].type == src2->cols[i].type);
if (src2->cols[i].len > 0 && !isNull(src2->cols[i].pData, src2->cols[i].type)) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfRows,
target->maxPoints);
target->maxPoints, 0);
} else if(!forceSetNull && key1 == key2 && src1->cols[i].len > 0) {
dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfRows,
target->maxPoints);
target->maxPoints, 0);
} else if(target->cols[i].len > 0) {
dataColSetNullAt(&target->cols[i], target->numOfRows);
}
......
......@@ -268,6 +268,8 @@ int32_t tsdbDebugFlag = 131;
int32_t cqDebugFlag = 131;
int32_t fsDebugFlag = 135;
int8_t tsClientMerge = 0;
#ifdef TD_TSZ
//
// lossy compress 6
......@@ -1642,6 +1644,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "clientMerge";
cfg.ptr = &tsClientMerge;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = 0;
cfg.maxValue = 1;
cfg.ptrLength = 1;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// default JSON string type option "binary"/"nchar"
cfg.option = "defaultJSONStrType";
cfg.ptr = tsDefaultJSONStrType;
......@@ -1715,9 +1727,9 @@ static void doInitGlobalConfig(void) {
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM);
assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#else
assert(tsGlobalConfigNum == TSDB_CFG_MAX_NUM - 5);
assert(tsGlobalConfigNum < TSDB_CFG_MAX_NUM);
#endif
}
......
......@@ -121,7 +121,7 @@ public class Utils {
}
private static void findValuesClauseRangeSet(String preparedSql, RangeSet<Integer> clauseRangeSet) {
Matcher matcher = Pattern.compile("(values|,)\\s*(\\([^)]*\\))").matcher(preparedSql);
Matcher matcher = Pattern.compile("(values||,)\\s*(\\([^)]*\\))").matcher(preparedSql);
while (matcher.find()) {
int start = matcher.start(2);
int end = matcher.end(2);
......
......@@ -518,7 +518,7 @@ public class SQLTest {
@Test
public void testCase050() {
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts and t1.location = t3.location";
String sql = "select * from restful_test.t1, restful_test.t3 where t1.ts = t3.ts";
// when
ResultSet rs = executeQuery(connection, sql);
// then
......
......@@ -73,6 +73,48 @@ public class UtilsTest {
Assert.assertEquals(expected, actual);
}
@Test
public void multiValuesAndWhitespace() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?) (?,?,?,?) (?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4) (200,3.1415,'xyz',5) (300,3.141592,'uvw',6)";
Assert.assertEquals(expected, actual);
}
@Test
public void multiValuesNoSeparator() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?)(?,?,?,?)(?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4)(200,3.1415,'xyz',5)(300,3.141592,'uvw',6)";
Assert.assertEquals(expected, actual);
}
@Test
public void multiValuesMultiSeparator() {
// given
String nativeSql = "INSERT INTO ? (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (?) VALUES (?,?,?,?) (?,?,?,?), (?,?,?,?)";
Object[] parameters = Stream.of("d1", 1, 100, 3.14, "abc", 4, 200, 3.1415, "xyz", 5, 300, 3.141592, "uvw", 6).toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO d1 (TS,CURRENT,VOLTAGE,PHASE) USING METERS TAGS (1) VALUES (100,3.14,'abc',4) (200,3.1415,'xyz',5), (300,3.141592,'uvw',6)";
Assert.assertEquals(expected, actual);
}
@Test
public void lineTerminator() {
// given
......@@ -100,6 +142,32 @@ public class UtilsTest {
Assert.assertEquals(expected, actual);
}
@Test
public void lineTerminatorAndMultiValuesAndNoneOrMoreWhitespace() {
String nativeSql = "INSERT Into ? TAGS(?) VALUES(?,?,\r\n?,?),(?,? ,\r\n?,?) t? tags (?) Values (?,?,?\r\n,?) (?,?,?,?) t? Tags(?) values (?,?,?,?) , (?,?,?,?)";
Object[] parameters = Stream.of("t1", "abc", 100, 1.1, "xxx", "xxx", 200, 2.2, "xxx", "xxx", 2, "bcd", 300, 3.3, "xxx", "xxx", 400, 4.4, "xxx", "xxx", 3, "cde", 500, 5.5, "xxx", "xxx", 600, 6.6, "xxx", "xxx").toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT Into t1 TAGS('abc') VALUES(100,1.1,\r\n'xxx','xxx'),(200,2.2 ,\r\n'xxx','xxx') t2 tags ('bcd') Values (300,3.3,'xxx'\r\n,'xxx') (400,4.4,'xxx','xxx') t3 Tags('cde') values (500,5.5,'xxx','xxx') , (600,6.6,'xxx','xxx')";
Assert.assertEquals(expected, actual);
}
@Test
public void multiValuesAndNoneOrMoreWhitespace() {
String nativeSql = "INSERT INTO ? USING traces TAGS (?, ?) VALUES (?, ?, ?, ?, ?, ?, ?) (?, ?, ?, ?, ?, ?, ?)";
Object[] parameters = Stream.of("t1", "t1", "t2", 1632968284000L, 111.111, 119.001, 0.4, 90, 99.1, "WGS84", 1632968285000L, 111.21109999999999, 120.001, 0.5, 91, 99.19999999999999, "WGS84").toArray();
// when
String actual = Utils.getNativeSql(nativeSql, parameters);
// then
String expected = "INSERT INTO t1 USING traces TAGS ('t1', 't2') VALUES (1632968284000, 111.111, 119.001, 0.4, 90, 99.1, 'WGS84') (1632968285000, 111.21109999999999, 120.001, 0.5, 91, 99.19999999999999, 'WGS84')";
Assert.assertEquals(expected, actual);
}
@Test
public void replaceNothing() {
// given
......
......@@ -26,6 +26,8 @@ ENDIF ()
SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos)
ELSEIF (TD_WINDOWS)
ADD_DEFINITIONS(-DUNICODE)
ADD_DEFINITIONS(-D_UNICODE)
LIST(APPEND SRC ./src/shellEngine.c)
LIST(APPEND SRC ./src/shellMain.c)
LIST(APPEND SRC ./src/shellWindows.c)
......
......@@ -95,6 +95,9 @@ SShellArguments args = {
*/
int main(int argc, char* argv[]) {
/*setlocale(LC_ALL, "en_US.UTF-8"); */
#ifdef WINDOWS
SetConsoleOutputCP(CP_UTF8);
#endif
if (!checkVersion()) {
exit(EXIT_FAILURE);
......
......@@ -272,13 +272,16 @@ int32_t shellReadCommand(TAOS *con, char command[]) {
cmd.command = (char *)calloc(1, MAX_COMMAND_SIZE);
// Read input.
char c;
void *console = GetStdHandle(STD_INPUT_HANDLE);
unsigned long read;
wchar_t c;
char mbStr[16];
while (1) {
c = getchar();
int ret = ReadConsole(console, &c, 1, &read, NULL);
int size = WideCharToMultiByte(CP_UTF8, 0, &c, read, mbStr, sizeof(mbStr), NULL, NULL);
mbStr[size] = 0;
switch (c) {
case '\n':
case '\r':
if (isReadyGo(&cmd)) {
sprintf(command, "%s%s", cmd.buffer, cmd.command);
free(cmd.buffer);
......@@ -291,8 +294,12 @@ int32_t shellReadCommand(TAOS *con, char command[]) {
updateBuffer(&cmd);
}
break;
case '\r':
break;
default:
insertChar(&cmd, c);
for (int i = 0; i < size; ++i) {
insertChar(&cmd, mbStr[i]);
}
}
}
......
......@@ -3567,8 +3567,10 @@ static int postProceSql(char *host, uint16_t port,
break;
received += bytes;
response_buf[RESP_BUF_LEN - 1] = '\0';
verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
__func__, __LINE__, received, resp_len, response_buf);
response_buf[RESP_BUF_LEN - 1] = '\0';
if (strlen(response_buf)) {
verbosePrint("%s() LN%d: received:%d resp_len:%d, response_buf:\n%s\n",
__func__, __LINE__, received, resp_len, response_buf);
......@@ -6611,7 +6613,6 @@ static int getRowDataFromSample(
stbInfo->sampleDataBuf
+ stbInfo->lenOfOneRow * (*sampleUsePos));
}
dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")");
(*sampleUsePos)++;
......@@ -11211,7 +11212,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pThreadInfo->start_time = pThreadInfo->start_time + rand_int() % 10000 - rand_tinyint();
}
*/
if (g_args.iface == REST_IFACE || ((stbInfo) && (stbInfo->iface == REST_IFACE))) {
#ifdef WINDOWS
WSADATA wsaData;
......@@ -11237,7 +11237,6 @@ static void startMultiThreadInsertData(int threads, char* db_name,
pThreadInfo->sockfd = sockfd;
}
tsem_init(&(pThreadInfo->lock_sem), 0, 0);
if (ASYNC_MODE == g_Dbs.asyncMode) {
pthread_create(pids + i, NULL, asyncWrite, pThreadInfo);
......
......@@ -229,6 +229,7 @@ typedef struct SQueryAttr {
bool stateWindow; // window State on sub/normal table
bool createFilterOperator; // if filter operator is needed
bool multigroupResult; // multigroup result can exist in one SSDataBlock
bool needSort; // need sort rowRes
int32_t interBufSize; // intermediate buffer sizse
int32_t havingNum; // having expr number
......
......@@ -153,6 +153,7 @@ typedef struct SQueryInfo {
int32_t havingFieldNum;
bool stableQuery;
bool groupbyColumn;
bool groupbyTag;
bool simpleAgg;
bool arithmeticOnAgg;
bool projectionQuery;
......
......@@ -1313,9 +1313,6 @@ static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx
if (pCtx[k].currentStage == MERGE_STAGE) {
pCtx[k].order = TSDB_ORDER_ASC;
}
pCtx[k].startTs = pQueryAttr->window.skey;
if (pCtx[k].functionId < 0) {
// load the script and exec
SUdfInfo* pUdfInfo = pRuntimeEnv->pUdfInfo;
......@@ -5991,6 +5988,18 @@ static SSDataBlock* doFilter(void* param, bool* newgroup) {
return NULL;
}
static int32_t resRowCompare(const void *r1, const void *r2) {
SResultRow *res1 = *(SResultRow **)r1;
SResultRow *res2 = *(SResultRow **)r2;
if (res1->win.skey == res2->win.skey) {
return 0;
} else {
return res1->win.skey > res2->win.skey ? 1 : -1;
}
}
static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
SOperatorInfo* pOperator = (SOperatorInfo*) param;
if (pOperator->status == OP_EXEC_DONE) {
......@@ -6036,6 +6045,10 @@ static SSDataBlock* doIntervalAgg(void* param, bool* newgroup) {
pQueryAttr->window = win;
pOperator->status = OP_RES_TO_RETURN;
if (pIntervalInfo->resultRowInfo.size > 0 && pQueryAttr->needSort) {
qsort(pIntervalInfo->resultRowInfo.pResult, pIntervalInfo->resultRowInfo.size, POINTER_BYTES, resRowCompare);
}
closeAllResultRows(&pIntervalInfo->resultRowInfo);
setQueryStatus(pRuntimeEnv, QUERY_COMPLETED);
finalizeQueryResult(pOperator, pIntervalInfo->pCtx, &pIntervalInfo->resultRowInfo, pIntervalInfo->rowCellInfoOffset);
......
......@@ -41,6 +41,7 @@ typedef struct STable {
int16_t restoreColumnNum;
bool hasRestoreLastColumn;
int lastColSVersion;
int16_t cacheLastConfigVersion;
T_REF_DECLARE()
} STable;
......
......@@ -80,7 +80,7 @@ struct STsdbRepo {
bool config_changed; // config changed flag
pthread_mutex_t save_mutex; // protect save config
uint8_t hasCachedLastColumn;
int16_t cacheLastConfigVersion;
STsdbAppH appH;
STsdbStat stat;
......@@ -111,7 +111,8 @@ int tsdbUnlockRepo(STsdbRepo* pRepo);
STsdbMeta* tsdbGetMeta(STsdbRepo* pRepo);
int tsdbCheckCommit(STsdbRepo* pRepo);
int tsdbRestoreInfo(STsdbRepo* pRepo);
int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg);
UNUSED_FUNC int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg);
int32_t tsdbLoadLastCache(STsdbRepo *pRepo, STable* pTable);
void tsdbGetRootDir(int repoid, char dirName[]);
void tsdbGetDataDir(int repoid, char dirName[]);
......
......@@ -1468,7 +1468,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
pTarget->maxPoints);
pTarget->maxPoints, 0);
}
pTarget->numOfRows++;
......@@ -1480,7 +1480,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL);
}
tdAppendMemRowToDataCol(row, pSchema, pTarget, true);
tdAppendMemRowToDataCol(row, pSchema, pTarget, true, 0);
tSkipListIterNext(pCommitIter->pIter);
} else {
......@@ -1489,7 +1489,7 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
for (int i = 0; i < pDataCols->numOfCols; i++) {
//TODO: dataColAppendVal may fail
dataColAppendVal(pTarget->cols + i, tdGetColDataOfRow(pDataCols->cols + i, *iter), pTarget->numOfRows,
pTarget->maxPoints);
pTarget->maxPoints, 0);
}
if(update == TD_ROW_DISCARD_UPDATE) pTarget->numOfRows++;
......@@ -1502,7 +1502,8 @@ static void tsdbLoadAndMergeFromCache(SDataCols *pDataCols, int *iter, SCommitIt
ASSERT(pSchema != NULL);
}
tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE);
tdAppendMemRowToDataCol(row, pSchema, pTarget, update == TD_ROW_OVERWRITE_UPDATE,
update != TD_ROW_PARTIAL_UPDATE ? 0 : -1);
}
(*iter)++;
tSkipListIterNext(pCommitIter->pIter);
......
......@@ -146,7 +146,9 @@ static void tsdbApplyRepoConfig(STsdbRepo *pRepo) {
if (oldCfg.cacheLastRow != pRepo->config.cacheLastRow) {
if (tsdbLockRepo(pRepo) < 0) return;
tsdbCacheLastData(pRepo, &oldCfg);
// tsdbCacheLastData(pRepo, &oldCfg);
// lazy load last cache when query or update
++pRepo->cacheLastConfigVersion;
tsdbUnlockRepo(pRepo);
}
......
......@@ -576,7 +576,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
return NULL;
}
pRepo->config_changed = false;
atomic_store_8(&pRepo->hasCachedLastColumn, 0);
pRepo->cacheLastConfigVersion = 0;
code = tsem_init(&(pRepo->readyToCommit), 0, 1);
if (code != 0) {
......@@ -726,7 +726,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
loadStatisData = true;
}
}
TSDB_WLOCK_TABLE(pTable); // lock when update pTable->lastCols[]
for (int16_t i = 0; i < numColumns && numColumns > pTable->restoreColumnNum; ++i) {
STColumn *pCol = schemaColAt(pSchema, i);
// ignore loaded columns
......@@ -775,6 +775,7 @@ static int tsdbRestoreLastColumns(STsdbRepo *pRepo, STable *pTable, SReadH* pRea
break;
}
}
TSDB_WUNLOCK_TABLE(pTable);
}
out:
......@@ -803,20 +804,33 @@ static int tsdbRestoreLastRow(STsdbRepo *pRepo, STable *pTable, SReadH* pReadh,
// Get the data in row
STSchema *pSchema = tsdbGetTableSchema(pTable);
pTable->lastRow = taosTMalloc(memRowMaxBytesFromSchema(pSchema));
if (pTable->lastRow == NULL) {
SMemRow lastRow = taosTMalloc(memRowMaxBytesFromSchema(pSchema));
if (lastRow == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
memRowSetType(pTable->lastRow, SMEM_ROW_DATA);
tdInitDataRow(memRowDataBody(pTable->lastRow), pSchema);
memRowSetType(lastRow, SMEM_ROW_DATA);
tdInitDataRow(memRowDataBody(lastRow), pSchema);
for (int icol = 0; icol < schemaNCols(pSchema); icol++) {
STColumn *pCol = schemaColAt(pSchema, icol);
SDataCol *pDataCol = pReadh->pDCols[0]->cols + icol;
tdAppendColVal(memRowDataBody(pTable->lastRow), tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type,
tdAppendColVal(memRowDataBody(lastRow), tdGetColDataOfRow(pDataCol, pBlock->numOfRows - 1), pCol->type,
pCol->offset);
}
TSKEY lastKey = memRowKey(lastRow);
// during the load data in file, new data would be inserted and last row has been updated
TSDB_WLOCK_TABLE(pTable);
if (pTable->lastRow == NULL) {
pTable->lastKey = lastKey;
pTable->lastRow = lastRow;
TSDB_WUNLOCK_TABLE(pTable);
} else {
TSDB_WUNLOCK_TABLE(pTable);
taosTZfree(lastRow);
}
return 0;
}
......@@ -887,14 +901,105 @@ int tsdbRestoreInfo(STsdbRepo *pRepo) {
tsdbDestroyReadH(&readh);
if (CACHE_LAST_NULL_COLUMN(pCfg)) {
atomic_store_8(&pRepo->hasCachedLastColumn, 1);
// if (CACHE_LAST_NULL_COLUMN(pCfg)) {
// atomic_store_8(&pRepo->hasCachedLastColumn, 1);
// }
return 0;
}
int32_t tsdbLoadLastCache(STsdbRepo *pRepo, STable *pTable) {
SFSIter fsiter;
SReadH readh;
SDFileSet *pSet;
int cacheLastRowTableNum = 0;
int cacheLastColTableNum = 0;
bool cacheLastRow = CACHE_LAST_ROW(&(pRepo->config));
bool cacheLastCol = CACHE_LAST_NULL_COLUMN(&(pRepo->config));
tsdbDebug("tsdbLoadLastCache for %s, cacheLastRow:%d, cacheLastCol:%d", pTable->name->data, cacheLastRow, cacheLastCol);
pTable->cacheLastConfigVersion = pRepo->cacheLastConfigVersion;
if (!cacheLastRow && pTable->lastRow != NULL) {
taosTZfree(pTable->lastRow);
pTable->lastRow = NULL;
}
if (!cacheLastCol && pTable->lastCols != NULL) {
tsdbFreeLastColumns(pTable);
}
if (!cacheLastRow && !cacheLastCol) {
return 0;
}
cacheLastRowTableNum = (cacheLastRow && pTable->lastRow == NULL) ? 1 : 0;
cacheLastColTableNum = (cacheLastCol && pTable->lastCols == NULL) ? 1 : 0;
if (cacheLastRowTableNum == 0 && cacheLastColTableNum == 0) {
return 0;
}
if (tsdbInitReadH(&readh, pRepo) < 0) {
return -1;
}
tsdbRLockFS(REPO_FS(pRepo));
tsdbFSIterInit(&fsiter, REPO_FS(pRepo), TSDB_FS_ITER_BACKWARD);
while ((cacheLastRowTableNum > 0 || cacheLastColTableNum > 0) && (pSet = tsdbFSIterNext(&fsiter)) != NULL) {
if (tsdbSetAndOpenReadFSet(&readh, pSet) < 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
if (tsdbLoadBlockIdx(&readh) < 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
// tsdbDebug("tsdbRestoreInfo restore vgId:%d,table:%s", REPO_ID(pRepo), pTable->name->data);
if (tsdbSetReadTable(&readh, pTable) < 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
SBlockIdx *pIdx = readh.pBlkIdx;
if (pIdx && (cacheLastRowTableNum > 0) && (pTable->lastRow == NULL)) {
if (tsdbRestoreLastRow(pRepo, pTable, &readh, pIdx) != 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
cacheLastRowTableNum -= 1;
}
// restore NULL columns
if (pIdx && (cacheLastColTableNum > 0) && !pTable->hasRestoreLastColumn) {
if (tsdbRestoreLastColumns(pRepo, pTable, &readh) != 0) {
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return -1;
}
if (pTable->hasRestoreLastColumn) {
cacheLastColTableNum -= 1;
}
}
}
tsdbUnLockFS(REPO_FS(pRepo));
tsdbDestroyReadH(&readh);
return 0;
}
int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
UNUSED_FUNC int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
bool cacheLastRow = false, cacheLastCol = false;
SFSIter fsiter;
SReadH readh;
......@@ -928,9 +1033,9 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
// if close last option,need to free data
if (need_free_last_row || need_free_last_col) {
if (need_free_last_col) {
atomic_store_8(&pRepo->hasCachedLastColumn, 0);
}
// if (need_free_last_col) {
// atomic_store_8(&pRepo->hasCachedLastColumn, 0);
// }
tsdbInfo("free cache last data since cacheLast option changed");
for (int i = 1; i <= maxTableIdx; i++) {
STable *pTable = pMeta->tables[i];
......@@ -1008,9 +1113,9 @@ int tsdbCacheLastData(STsdbRepo *pRepo, STsdbCfg* oldCfg) {
tsdbDestroyReadH(&readh);
if (cacheLastCol) {
atomic_store_8(&pRepo->hasCachedLastColumn, 1);
}
// if (cacheLastCol) {
// atomic_store_8(&pRepo->hasCachedLastColumn, 1);
// }
return 0;
}
......@@ -594,7 +594,7 @@ static int tsdbAppendTableRowToCols(STable *pTable, SDataCols *pCols, STSchema *
}
}
tdAppendMemRowToDataCol(row, *ppSchema, pCols, true);
tdAppendMemRowToDataCol(row, *ppSchema, pCols, true, 0);
}
return 0;
......@@ -1001,7 +1001,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
if ((value == NULL) || isNull(value, pTCol->type)) {
continue;
}
// lock
TSDB_WLOCK_TABLE(pTable);
SDataCol *pDataCol = &(pLatestCols[idx]);
if (pDataCol->pData == NULL) {
pDataCol->pData = malloc(pTCol->bytes);
......@@ -1017,6 +1018,8 @@ static void updateTableLatestColumn(STsdbRepo *pRepo, STable *pTable, SMemRow ro
memcpy(pDataCol->pData, value, bytes);
//tsdbInfo("updateTableLatestColumn vgId:%d cache column %d for %d,%s", REPO_ID(pRepo), j, pDataCol->bytes, (char*)pDataCol->pData);
pDataCol->ts = memRowKey(row);
// unlock
TSDB_WUNLOCK_TABLE(pTable);
}
}
......@@ -1063,5 +1066,8 @@ static int tsdbUpdateTableLatestInfo(STsdbRepo *pRepo, STable *pTable, SMemRow r
updateTableLatestColumn(pRepo, pTable, row);
}
}
pTable->cacheLastConfigVersion = pRepo->cacheLastConfigVersion;
return 0;
}
......@@ -639,18 +639,19 @@ int16_t tsdbGetLastColumnsIndexByColId(STable* pTable, int16_t colId) {
}
int tsdbInitColIdCacheWithSchema(STable* pTable, STSchema* pSchema) {
ASSERT(pTable->lastCols == NULL);
TSDB_WLOCK_TABLE(pTable);
if (pTable->lastCols == NULL) {
int16_t numOfColumn = pSchema->numOfCols;
pTable->lastCols = (SDataCol*)malloc(numOfColumn * sizeof(SDataCol));
pTable->lastCols = (SDataCol *)malloc(numOfColumn * sizeof(SDataCol));
if (pTable->lastCols == NULL) {
TSDB_WUNLOCK_TABLE(pTable);
return -1;
}
for (int16_t i = 0; i < numOfColumn; ++i) {
STColumn *pCol = schemaColAt(pSchema, i);
SDataCol* pDataCol = &(pTable->lastCols[i]);
SDataCol *pDataCol = &(pTable->lastCols[i]);
pDataCol->bytes = 0;
pDataCol->pData = NULL;
pDataCol->colId = pCol->colId;
......@@ -660,6 +661,8 @@ int tsdbInitColIdCacheWithSchema(STable* pTable, STSchema* pSchema) {
pTable->maxColNum = numOfColumn;
pTable->restoreColumnNum = 0;
pTable->hasRestoreLastColumn = false;
}
TSDB_WUNLOCK_TABLE(pTable);
return 0;
}
......@@ -809,6 +812,7 @@ static STable *tsdbNewTable() {
pTable->lastCols = NULL;
pTable->restoreColumnNum = 0;
pTable->cacheLastConfigVersion = 0;
pTable->maxColNum = 0;
pTable->hasRestoreLastColumn = false;
pTable->lastColSVersion = -1;
......
......@@ -157,6 +157,7 @@ typedef struct STableGroupSupporter {
static STimeWindow updateLastrowForEachGroup(STableGroupInfo *groupList);
static int32_t checkForCachedLastRow(STsdbQueryHandle* pQueryHandle, STableGroupInfo *groupList);
static int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle);
static int32_t lazyLoadCacheLast(STsdbQueryHandle* pQueryHandle);
static int32_t tsdbGetCachedLastRow(STable* pTable, SMemRow* pRes, TSKEY* lastKey);
static void changeQueryHandleForInterpQuery(TsdbQueryHandleT pHandle);
......@@ -591,6 +592,28 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon
pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next);
}
static int32_t lazyLoadCacheLast(STsdbQueryHandle* pQueryHandle) {
STsdbRepo* pRepo = pQueryHandle->pTsdb;
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
int32_t code = 0;
for (size_t i = 0; i < numOfTables; ++i) {
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
STable* pTable = pCheckInfo->pTableObj;
if (pTable->cacheLastConfigVersion == pRepo->cacheLastConfigVersion) {
continue;
}
code = tsdbLoadLastCache(pRepo, pTable);
if (code != 0) {
tsdbError("%p uid:%" PRId64 ", tid:%d, failed to load last cache since %s", pQueryHandle, pTable->tableId.uid,
pTable->tableId.tid, tstrerror(terrno));
break;
}
}
return code;
}
TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
pCond->twindow = updateLastrowForEachGroup(groupList);
......@@ -604,6 +627,8 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
return NULL;
}
lazyLoadCacheLast(pQueryHandle);
int32_t code = checkForCachedLastRow(pQueryHandle, groupList);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code;
......@@ -618,13 +643,14 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable
return pQueryHandle;
}
TsdbQueryHandleT tsdbQueryCacheLast(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef);
if (pQueryHandle == NULL) {
return NULL;
}
lazyLoadCacheLast(pQueryHandle);
int32_t code = checkForCachedLast(pQueryHandle);
if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0
terrno = code;
......@@ -2758,6 +2784,9 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) {
}
int32_t i = 0, j = 0;
// lock pTable->lastCols[i] as it would be released when schema update(tsdbUpdateLastColSchema)
TSDB_RLOCK_TABLE(pTable);
while(i < tgNumOfCols && j < numOfCols) {
pColInfo = taosArrayGet(pQueryHandle->pColumns, i);
if (pTable->lastCols[j].colId < pColInfo->info.colId) {
......@@ -2844,6 +2873,7 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) {
i++;
j++;
}
TSDB_RUNLOCK_TABLE(pTable);
// leave the real ts column as the last row, because last function only (not stable) use the last row as res
if (priKey != TSKEY_INITIAL_VAL) {
......@@ -3175,7 +3205,9 @@ int32_t checkForCachedLast(STsdbQueryHandle* pQueryHandle) {
int32_t code = 0;
if (pQueryHandle->pTsdb && atomic_load_8(&pQueryHandle->pTsdb->hasCachedLastColumn)){
STsdbRepo* pRepo = pQueryHandle->pTsdb;
if (pRepo && CACHE_LAST_NULL_COLUMN(&(pRepo->config))) {
pQueryHandle->cachelastrow = TSDB_CACHED_TYPE_LAST;
}
......
......@@ -20,7 +20,7 @@
extern "C" {
#endif
#define TSDB_CFG_MAX_NUM 125
#define TSDB_CFG_MAX_NUM 128
#define TSDB_CFG_PRINT_LEN 23
#define TSDB_CFG_OPTION_LEN 24
#define TSDB_CFG_VALUE_LEN 41
......
......@@ -346,7 +346,7 @@ int tsCompressBoolImp(const char *const input, const int nelements, char *const
/* t = (~((( uint8_t)1) << (7-i%BITS_PER_BYTE))); */
output[pos] |= t;
} else {
uError("Invalid compress bool value:%d", output[pos]);
uError("Invalid compress bool value:%d", input[i]);
return -1;
}
}
......
......@@ -144,7 +144,6 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
// backward to put the first data
hasDup = tSkipListGetPosToPut(pSkipList, backward, pData);
tSkipListPutImpl(pSkipList, pData, backward, false, hasDup);
for (int level = 0; level < pSkipList->maxLevel; level++) {
......@@ -163,7 +162,12 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
for (int i = 0; i < pSkipList->maxLevel; i++) {
forward[i] = SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail, i);
}
} else if(compare == 0) {
// same need special deal
forward[0] = SL_NODE_GET_BACKWARD_POINTER(SL_NODE_GET_BACKWARD_POINTER(pSkipList->pTail,0),0);
hasDup = true;
} else {
SSkipListNode *p = NULL;
SSkipListNode *px = pSkipList->pHead;
for (int i = pSkipList->maxLevel - 1; i >= 0; --i) {
if (i < pSkipList->level) {
......@@ -175,19 +179,29 @@ void tSkipListPutBatchByIter(SSkipList *pSkipList, void *iter, iter_next_fn_t it
}
}
SSkipListNode *p = SL_NODE_GET_FORWARD_POINTER(px, i);
// if px not head , must compare with px
if(px == pSkipList->pHead) {
p = SL_NODE_GET_FORWARD_POINTER(px, i);
} else {
p = px;
}
while (p != pSkipList->pTail) {
pKey = SL_GET_NODE_KEY(pSkipList, p);
compare = pSkipList->comparFn(pKey, pDataKey);
if (compare >= 0) {
if (compare == 0 && !hasDup) hasDup = true;
if (compare == 0) {
hasDup = true;
forward[0] = SL_NODE_GET_BACKWARD_POINTER(p, 0);
}
break;
} else {
px = p;
p = SL_NODE_GET_FORWARD_POINTER(px, i);
}
}
// if found duplicate, immediately break, needn't continue to loop set rest forward[i] value
if(hasDup) break;
}
forward[i] = px;
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from util.log import *
from util.sql import *
from util.dnodes import *
import taos
import threading
import subprocess
from random import choice
class TwoClients:
def initConnection(self):
self.host = "chenhaoran01"
self.user = "root"
self.password = "taosdata"
self.config = "/home/chr/cfg/single/"
self.port =6030
self.rowNum = 10
self.ts = 1537146000000
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def run(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath+ "/build/bin/"
walFilePath = "/var/lib/taos/mnode_bak/wal/"
# new taos client
conn1 = taos.connect(host=self.host, user=self.user, password=self.password, config=self.config )
print(conn1)
cur1 = conn1.cursor()
tdSql.init(cur1, True)
# create backgroud db and tb
tdSql.execute("drop database if exists db1")
os.system("%staosdemo -f compress/insertDataDb1.json -y " % binPath)
# create foreground db and tb
tdSql.execute("drop database if exists foredb")
tdSql.execute("create database foredb")
tdSql.execute("use foredb")
print("123test")
tdSql.execute("create stable if not exists stb (ts timestamp, dataInt int, dataDouble double,dataStr nchar(200)) tags(loc nchar(50),t1 int)")
tdSql.execute("create table tb1 using stb tags('beijing1', 10)")
tdSql.execute("insert into tb1 values(1614218412000,8635,98.861,'qazwsxedcrfvtgbyhnujmikolp1')(1614218422000,8636,98.862,'qazwsxedcrfvtgbyhnujmikolp2')")
tdSql.execute("create table tb2 using stb tags('beijing2', 11)")
tdSql.execute("insert into tb2 values(1614218432000,8647,98.863,'qazwsxedcrfvtgbyhnujmikolp3')")
tdSql.execute("insert into tb2 values(1614218442000,8648,98.864,'qazwsxedcrfvtgbyhnujmikolp4')")
# check data correct
tdSql.execute("use db1")
tdSql.query("select count(tbname) from stb0")
tdSql.checkData(0, 0, 50000)
tdSql.query("select count(*) from stb0")
tdSql.checkData(0, 0, 5000000)
tdSql.execute("use foredb")
tdSql.query("select count (tbname) from stb")
tdSql.checkData(0, 0, 2)
tdSql.query("select count (*) from stb")
tdSql.checkData(0, 0, 4)
tdSql.query("select * from tb1 order by ts")
tdSql.checkData(0, 3, "qazwsxedcrfvtgbyhnujmikolp1")
tdSql.query("select * from tb2 order by ts")
tdSql.checkData(1, 3, "qazwsxedcrfvtgbyhnujmikolp4")
# delete useless file
testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf ./insert_res.txt")
# os.system("rm -rf compress/%s.sql" % testcaseFilename )
clients = TwoClients()
clients.initConnection()
# clients.getBuildPath()
clients.run()
\ No newline at end of file
{
"filetype": "insert",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"thread_count": 4,
"thread_count_create_tbl": 4,
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 10,
"num_of_records_per_req": 1000,
"max_sql_len": 1024000,
"databases": [{
"dbinfo": {
"name": "db1",
"drop": "yes",
"replica": 1,
"days": 10,
"cache": 50,
"blocks": 8,
"precision": "ms",
"keep": 3650,
"minRows": 100,
"maxRows": 4096,
"comp":2,
"walLevel":1,
"cachelast":0,
"quorum":1,
"fsync":3000,
"update": 0
},
"super_tables": [{
"name": "stb0",
"child_table_exists":"no",
"childtable_count": 50000,
"childtable_prefix": "stb00_",
"auto_create_table": "no",
"batch_create_tbl_num": 100,
"data_source": "rand",
"insert_mode": "taosc",
"insert_rows": 100,
"childtable_limit": 0,
"childtable_offset":0,
"interlace_rows": 0,
"insert_interval":0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
"timestamp_step": 1,
"start_timestamp": "2020-10-01 00:00:00.000",
"sample_format": "csv",
"sample_file": "./sample.csv",
"tags_file": "",
"columns": [{"type": "INT"},{"type": "TINYINT"},{"type": "smallint"},{"type": "bool"},{"type": "bigint"},{"type": "float"},{"type": "double"}, {"type": "BINARY","len": 32}, {"type": "nchar","len": 32}],
"tags": [{"type": "TINYINT", "count":2}, {"type": "BINARY", "len": 16, "count":1}]
}]
}]
}
......@@ -273,6 +273,7 @@ python3 ./test.py -f query/queryStateWindow.py
# python3 ./test.py -f query/nestedQuery/queryWithOrderLimit.py
python3 ./test.py -f query/nestquery_last_row.py
python3 ./test.py -f query/nestedQuery/nestedQuery.py
python3 ./test.py -f query/nestedQuery/nestedQuery_datacheck.py
python3 ./test.py -f query/queryCnameDisplay.py
# python3 ./test.py -f query/operator_cost.py
# python3 ./test.py -f query/long_where_query.py
......
......@@ -61,6 +61,12 @@ class TDTestCase:
tdSql.query("select count(*) from stb")
tdSql.checkData(0, 0, 4096)
sql = "create table stb(ts timestamp, "
for i in range(15):
sql += "col%d binary(1022), " % (i + 1)
sql += "col1023 binary(1015))"
tdSql.error(sql)
endTime = time.time()
sql = "create table stb(ts timestamp, "
......
......@@ -1714,7 +1714,6 @@ class TDTestCase:
sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(q_u_where)
sql += "%s " % random.choice(session_u_where)
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -1731,7 +1730,6 @@ class TDTestCase:
sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(q_u_or_where)
sql += "%s " % random.choice(session_u_where)
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -1767,7 +1765,6 @@ class TDTestCase:
sql += " from table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(q_u_where)
sql += "%s " % random.choice(session_u_where)
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -1784,7 +1781,6 @@ class TDTestCase:
sql += " from table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(q_u_or_where)
sql += "%s " % random.choice(session_u_where)
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -1818,7 +1814,6 @@ class TDTestCase:
sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(t_join_where)
sql += "%s " % random.choice(session_u_where)
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -1835,7 +1830,6 @@ class TDTestCase:
sql += " from stable_1 t1, stable_2 t2 where t1.ts = t2.ts and "
sql += "%s " % random.choice(qt_u_or_where)
sql += "%s " % random.choice(session_u_where)
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -2015,7 +2009,6 @@ class TDTestCase:
sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
sql += "%s and " % random.choice(t_join_where)
sql += "%s " % random.choice(interp_where_j)
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -2032,7 +2025,6 @@ class TDTestCase:
sql += " from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
sql += "%s and " % random.choice(qt_u_or_where)
sql += "%s " % random.choice(interp_where_j)
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -2065,7 +2057,6 @@ class TDTestCase:
sql += " from table_0 t1, table_1 t2 where t1.ts = t2.ts and "
#sql += "%s and " % random.choice(t_join_where)
sql += "%s " % interp_where_j[random.randint(0,5)]
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......@@ -2116,7 +2107,6 @@ class TDTestCase:
sql += " from regular_table_1 t1, regular_table_2 t2 where t1.ts = t2.ts and "
#sql += "%s " % random.choice(interp_where_j)
sql += "%s " % interp_where_j[random.randint(0,5)]
sql += "%s " % random.choice(fill_where)
sql += "%s " % random.choice(order_u_where)
sql += "%s " % random.choice(limit_u_where)
sql += ") "
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import random
import string
import os
import sys
import time
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
from util.dnodes import tdDnodes
from util.dnodes import *
class TDTestCase:
updatecfgDict={'maxSQLLength':1048576}
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
os.system("rm -rf query/nestedQuery/nestedQuery_datacheck.py.sql")
now = time.time()
self.ts = 1630000000000
self.num = 100
self.fornum = 3
def get_random_string(self, length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def restartDnodes(self):
tdDnodes.stop(1)
tdDnodes.start(1)
def dropandcreateDB(self,n):
for i in range(n):
tdSql.execute('''drop database if exists db ;''')
tdSql.execute('''create database db keep 36500;''')
tdSql.execute('''use db;''')
tdSql.execute('''create stable stable_1
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp)
tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,t_float float , t_double double , t_ts timestamp);''')
tdSql.execute('''create stable stable_2
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp)
tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint,
t_bool bool , t_binary binary(20) , t_nchar nchar(20) ,t_float float , t_double double , t_ts timestamp);''')
tdSql.execute('''create table table_0 using stable_1
tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
tdSql.execute('''create table table_1 using stable_1
tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 ,
'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''')
tdSql.execute('''create table table_2 using stable_1
tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false ,
'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''')
tdSql.execute('''create table table_21 using stable_2
tags('table_21' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''')
#regular table
tdSql.execute('''create table regular_table_1
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp) ;''')
tdSql.execute('''create table regular_table_2
(ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint,
q_bool bool , q_binary binary(20) , q_nchar nchar(20) ,q_float float , q_double double , q_ts timestamp) ;''')
for i in range(self.num):
tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
% (self.ts + i*10000000, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
% (self.ts + i*10000000+1, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
i, i, i, i, 1262304000001 + i))
tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
% (self.ts + i*10000000+2, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
i, i, -i, -i, 1577836800001 + i))
tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
% (self.ts + i*10000000, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)'''
% (self.ts + i*10000000, i, i, i, i, i, i, i, i, self.ts + i))
tdSql.execute('''insert into regular_table_2 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)'''
% (self.ts + i*10000000 , 2147483647-i, 9223372036854775807-i, 32767-i, 127-i,
i, i, i, i, 1262304000001 + i))
tdSql.execute('''insert into regular_table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)'''
% (self.ts + i*10000000 +2, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i,
i, i, -i, -i, 1577836800001 + i))
def regular1_checkall_0(self,sql):
tdLog.info(sql)
tdSql.query(sql)
tdSql.checkData(0,0,'2021-08-27 01:46:40.000')
tdSql.checkData(0,1,0)
tdSql.checkData(0,2,0)
tdSql.checkData(0,3,0)
tdSql.checkData(0,4,0)
tdSql.checkData(0,5,'False')
tdSql.checkData(0,6,'binary.0')
tdSql.checkData(0,7,'nchar.0')
tdSql.checkData(0,8,0)
tdSql.checkData(0,9,0)
tdSql.checkData(0,10,'2021-08-27 01:46:40.000')
def regular1_checkall_100(self,sql):
tdLog.info(sql)
tdSql.query(sql)
tdSql.checkData(99,0,'2021-09-07 12:46:40.000')
tdSql.checkData(99,1,99)
tdSql.checkData(99,2,99)
tdSql.checkData(99,3,99)
tdSql.checkData(99,4,99)
tdSql.checkData(99,5,'False')
tdSql.checkData(99,6,'binary.99')
tdSql.checkData(99,7,'nchar.99')
tdSql.checkData(99,8,99)
tdSql.checkData(99,9,99)
tdSql.checkData(99,10,'2021-08-27 01:46:40.099')
def regular_join_checkall_0(self,sql):
self.regular1_checkall_0(sql)
tdSql.checkData(0,11,'2021-08-27 01:46:40.000')
tdSql.checkData(0,12,2147483647)
tdSql.checkData(0,13,9223372036854775807)
tdSql.checkData(0,14,32767)
tdSql.checkData(0,15,127)
tdSql.checkData(0,16,'True')
tdSql.checkData(0,17,'binary1.0')
tdSql.checkData(0,18,'nchar1.0')
tdSql.checkData(0,19,0)
tdSql.checkData(0,20,0)
tdSql.checkData(0,21,'2010-01-01 08:00:00.001')
def regular_join_checkall_100(self,sql):
self.regular1_checkall_100(sql)
tdSql.checkData(99,11,'2021-09-07 12:46:40.000')
tdSql.checkData(99,12,2147483548)
tdSql.checkData(99,13,9223372036854775708)
tdSql.checkData(99,14,32668)
tdSql.checkData(99,15,28)
tdSql.checkData(99,16,'True')
tdSql.checkData(99,17,'binary1.99')
tdSql.checkData(99,18,'nchar1.99')
tdSql.checkData(99,19,99)
tdSql.checkData(99,20,99)
tdSql.checkData(99,21,'2010-01-01 08:00:00.100')
def stable1_checkall_0(self,sql):
self.regular1_checkall_0(sql)
def stable1_checkall_300(self,sql):
tdLog.info(sql)
tdSql.query(sql)
tdSql.checkData(299,0,'2021-09-07 12:46:40.002')
tdSql.checkData(299,1,-2147483548)
tdSql.checkData(299,2,-9223372036854775708)
tdSql.checkData(299,3,-32668)
tdSql.checkData(299,4,-28)
tdSql.checkData(299,5,'True')
tdSql.checkData(299,6,'binary2.99')
tdSql.checkData(299,7,'nchar2nchar2.99')
tdSql.checkData(299,8,-99)
tdSql.checkData(299,9,-99)
tdSql.checkData(299,10,'2010-01-01 08:00:00.100')
def stable_join_checkall_0(self,sql):
self.regular1_checkall_0(sql)
tdSql.checkData(0,22,'2021-08-27 01:46:40.000')
tdSql.checkData(0,23,0)
tdSql.checkData(0,24,0)
tdSql.checkData(0,25,0)
tdSql.checkData(0,26,0)
tdSql.checkData(0,27,'False')
tdSql.checkData(0,28,'binary.0')
tdSql.checkData(0,29,'nchar.0')
tdSql.checkData(0,30,0)
tdSql.checkData(0,31,0)
tdSql.checkData(0,32,'2021-08-27 01:46:40.000')
def stable_join_checkall_100(self,sql):
tdSql.checkData(99,0,'2021-09-07 12:46:40.000')
tdSql.checkData(99,1,99)
tdSql.checkData(99,2,99)
tdSql.checkData(99,3,99)
tdSql.checkData(99,4,99)
tdSql.checkData(99,5,'False')
tdSql.checkData(99,6,'binary.99')
tdSql.checkData(99,7,'nchar.99')
tdSql.checkData(99,8,99)
tdSql.checkData(99,9,99)
tdSql.checkData(99,10,'2021-08-27 01:46:40.099')
tdSql.checkData(99,22,'2021-09-07 12:46:40.000')
tdSql.checkData(99,23,99)
tdSql.checkData(99,24,99)
tdSql.checkData(99,25,99)
tdSql.checkData(99,26,99)
tdSql.checkData(99,27,'False')
tdSql.checkData(99,28,'binary.99')
tdSql.checkData(99,29,'nchar.99')
tdSql.checkData(99,30,99)
tdSql.checkData(99,31,99)
tdSql.checkData(99,32,'2021-08-27 01:46:40.099')
def run(self):
tdSql.prepare()
# test case for https://jira.taosdata.com:18080/browse/TD-5665
os.system("rm -rf nestedQuery.py.sql")
startTime = time.time()
dcDB = self.dropandcreateDB(1)
# regular column select
q_select= ['ts' , '*' , 'q_int', 'q_bigint' , 'q_bigint' , 'q_smallint' , 'q_tinyint' , 'q_bool' , 'q_binary' , 'q_nchar' ,'q_float' , 'q_double' ,'q_ts ']
# tag column select
t_select= ['*' , 'loc' ,'t_int', 't_bigint' , 't_bigint' , 't_smallint' , 't_tinyint' , 't_bool' , 't_binary' , 't_nchar' ,'t_float' , 't_double' ,'t_ts ']
# regular and tag column select
qt_select= q_select + t_select
# distinct regular column select
dq_select= ['distinct q_int', 'distinct q_bigint' , 'distinct q_smallint' , 'distinct q_tinyint' ,
'distinct q_bool' , 'distinct q_binary' , 'distinct q_nchar' ,'distinct q_float' , 'distinct q_double' ,'distinct q_ts ']
# distinct tag column select
dt_select= ['distinct loc', 'distinct t_int', 'distinct t_bigint' , 'distinct t_smallint' , 'distinct t_tinyint' ,
'distinct t_bool' , 'distinct t_binary' , 'distinct t_nchar' ,'distinct t_float' , 'distinct t_double' ,'distinct t_ts ']
# distinct regular and tag column select
dqt_select= dq_select + dt_select
# special column select
s_r_select= ['_c0', '_C0' ]
s_s_select= ['tbname' , '_c0', '_C0' ]
# regular column where
q_where = ['ts < now +1s','q_bigint >= -9223372036854775807 and q_bigint <= 9223372036854775807', 'q_int <= 2147483647 and q_int >= -2147483647',
'q_smallint >= -32767 and q_smallint <= 32767','q_tinyint >= -127 and q_tinyint <= 127','q_float >= -100000 and q_float <= 100000',
'q_double >= -1000000000 and q_double <= 1000000000', 'q_binary like \'binary%\' or q_binary = \'0\' ' , 'q_nchar like \'nchar%\' or q_nchar = \'0\' ' ,
'q_bool = true or q_bool = false' , 'q_bool in (0 , 1)' , 'q_bool in ( true , false)' , 'q_bool = 0 or q_bool = 1',
'q_bigint between -9223372036854775807 and 9223372036854775807',' q_int between -2147483647 and 2147483647','q_smallint between -32767 and 32767',
'q_tinyint between -127 and 127 ','q_float between -100000 and 100000','q_double between -1000000000 and 1000000000']
#TD-6201 ,'q_bool between 0 and 1'
# regular column where for test union,join
q_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.q_bigint >= -9223372036854775807 and t1.q_bigint <= 9223372036854775807 and t2.q_bigint >= -9223372036854775807 and t2.q_bigint <= 9223372036854775807',
't1.q_int <= 2147483647 and t1.q_int >= -2147483647 and t2.q_int <= 2147483647 and t2.q_int >= -2147483647',
't1.q_smallint >= -32767 and t1.q_smallint <= 32767 and t2.q_smallint >= -32767 and t2.q_smallint <= 32767',
't1.q_tinyint >= -127 and t1.q_tinyint <= 127 and t2.q_tinyint >= -127 and t2.q_tinyint <= 127',
't1.q_float >= -100000 and t1.q_float <= 100000 and t2.q_float >= -100000 and t2.q_float <= 100000',
't1.q_double >= -1000000000 and t1.q_double <= 1000000000 and t2.q_double >= -1000000000 and t2.q_double <= 1000000000',
't1.q_binary like \'binary%\' and t2.q_binary like \'binary%\' ' ,
't1.q_nchar like \'nchar%\' and t2.q_nchar like \'nchar%\' ' ,
't1.q_bool in (0 , 1) and t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) and t2.q_bool in ( true , false)' ,
't1.q_bigint between -9223372036854775807 and 9223372036854775807 and t2.q_bigint between -9223372036854775807 and 9223372036854775807',
't1.q_int between -2147483647 and 2147483647 and t2.q_int between -2147483647 and 2147483647',
't1.q_smallint between -32767 and 32767 and t2.q_smallint between -32767 and 32767',
't1.q_tinyint between -127 and 127 and t2.q_tinyint between -127 and 127 ','t1.q_float between -100000 and 100000 and t2.q_float between -100000 and 100000',
't1.q_double between -1000000000 and 1000000000 and t2.q_double between -1000000000 and 1000000000']
#TD-6201 ,'t1.q_bool between 0 and 1 or t2.q_bool between 0 and 1']
#'t1.q_bool = true and t1.q_bool = false and t2.q_bool = true and t2.q_bool = false' , 't1.q_bool = 0 and t1.q_bool = 1 and t2.q_bool = 0 and t2.q_bool = 1' ,
q_u_or_where = ['t1.q_binary like \'binary%\' or t1.q_binary = \'0\' or t2.q_binary like \'binary%\' or t2.q_binary = \'0\' ' ,
't1.q_nchar like \'nchar%\' or t1.q_nchar = \'0\' or t2.q_nchar like \'nchar%\' or t2.q_nchar = \'0\' ' , 't1.q_bool = true or t1.q_bool = false or t2.q_bool = true or t2.q_bool = false' ,
't1.q_bool in (0 , 1) or t2.q_bool in (0 , 1)' , 't1.q_bool in ( true , false) or t2.q_bool in ( true , false)' , 't1.q_bool = 0 or t1.q_bool = 1 or t2.q_bool = 0 or t2.q_bool = 1' ,
't1.q_bigint between -9223372036854775807 and 9223372036854775807 or t2.q_bigint between -9223372036854775807 and 9223372036854775807',
't1.q_int between -2147483647 and 2147483647 or t2.q_int between -2147483647 and 2147483647',
't1.q_smallint between -32767 and 32767 or t2.q_smallint between -32767 and 32767',
't1.q_tinyint between -127 and 127 or t2.q_tinyint between -127 and 127 ','t1.q_float between -100000 and 100000 or t2.q_float between -100000 and 100000',
't1.q_double between -1000000000 and 1000000000 or t2.q_double between -1000000000 and 1000000000']
# tag column where
t_where = ['ts < now +1s','t_bigint >= -9223372036854775807 and t_bigint <= 9223372036854775807','t_int <= 2147483647 and t_int >= -2147483647',
't_smallint >= -32767 and t_smallint <= 32767','q_tinyint >= -127 and t_tinyint <= 127','t_float >= -100000 and t_float <= 100000',
't_double >= -1000000000 and t_double <= 1000000000', 't_binary like \'binary%\' or t_binary = \'0\' ' , 't_nchar like \'nchar%\' or t_nchar = \'0\'' ,
't_bool = true or t_bool = false' , 't_bool in (0 , 1)' , 't_bool in ( true , false)' , 't_bool = 0 or t_bool = 1',
't_bigint between -9223372036854775807 and 9223372036854775807',' t_int between -2147483647 and 2147483647','t_smallint between -32767 and 32767',
't_tinyint between -127 and 127 ','t_float between -100000 and 100000','t_double between -1000000000 and 1000000000']
#TD-6201,'t_bool between 0 and 1'
# tag column where for test union,join | this is not support
t_u_where = ['t1.ts < now +1s' , 't2.ts < now +1s','t1.t_bigint >= -9223372036854775807 and t1.t_bigint <= 9223372036854775807 and t2.t_bigint >= -9223372036854775807 and t2.t_bigint <= 9223372036854775807',
't1.t_int <= 2147483647 and t1.t_int >= -2147483647 and t2.t_int <= 2147483647 and t2.t_int >= -2147483647',
't1.t_smallint >= -32767 and t1.t_smallint <= 32767 and t2.t_smallint >= -32767 and t2.t_smallint <= 32767',
't1.t_tinyint >= -127 and t1.t_tinyint <= 127 and t2.t_tinyint >= -127 and t2.t_tinyint <= 127',
't1.t_float >= -100000 and t1.t_float <= 100000 and t2.t_float >= -100000 and t2.t_float <= 100000',
't1.t_double >= -1000000000 and t1.t_double <= 1000000000 and t2.t_double >= -1000000000 and t2.t_double <= 1000000000',
't1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' ' ,
't1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' ' , 't1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false' ,
't1.t_bool in (0 , 1) and t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) and t2.t_bool in ( true , false)' , 't1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1',
't1.t_bigint between -9223372036854775807 and 9223372036854775807 and t2.t_bigint between -9223372036854775807 and 9223372036854775807',
't1.t_int between -2147483647 and 2147483647 and t2.t_int between -2147483647 and 2147483647',
't1.t_smallint between -32767 and 32767 and t2.t_smallint between -32767 and 32767',
't1.t_tinyint between -127 and 127 and t2.t_tinyint between -127 and 127 ','t1.t_float between -100000 and 100000 and t2.t_float between -100000 and 100000',
't1.t_double between -1000000000 and 1000000000 and t2.t_double between -1000000000 and 1000000000']
#TD-6201,'t1.t_bool between 0 and 1 or t2.q_bool between 0 and 1']
t_u_or_where = ['t1.t_binary like \'binary%\' or t1.t_binary = \'0\' or t2.t_binary like \'binary%\' or t2.t_binary = \'0\' ' ,
't1.t_nchar like \'nchar%\' or t1.t_nchar = \'0\' or t2.t_nchar like \'nchar%\' or t2.t_nchar = \'0\' ' , 't1.t_bool = true or t1.t_bool = false or t2.t_bool = true or t2.t_bool = false' ,
't1.t_bool in (0 , 1) or t2.t_bool in (0 , 1)' , 't1.t_bool in ( true , false) or t2.t_bool in ( true , false)' , 't1.t_bool = 0 or t1.t_bool = 1 or t2.t_bool = 0 or t2.t_bool = 1',
't1.t_bigint between -9223372036854775807 and 9223372036854775807 or t2.t_bigint between -9223372036854775807 and 9223372036854775807',
't1.t_int between -2147483647 and 2147483647 or t2.t_int between -2147483647 and 2147483647',
't1.t_smallint between -32767 and 32767 or t2.t_smallint between -32767 and 32767',
't1.t_tinyint between -127 and 127 or t2.t_tinyint between -127 and 127 ','t1.t_float between -100000 and 100000 or t2.t_float between -100000 and 100000',
't1.t_double between -1000000000 and 1000000000 or t2.t_double between -1000000000 and 1000000000']
# regular and tag column where
qt_where = q_where + t_where
qt_u_where = q_u_where + t_u_where
# now,qt_u_or_where is not support
qt_u_or_where = q_u_or_where + t_u_or_where
# tag column where for test super join | this is support , 't1.t_bool = t2.t_bool ' ???
t_join_where = ['t1.t_bigint = t2.t_bigint ', 't1.t_int = t2.t_int ', 't1.t_smallint = t2.t_smallint ', 't1.t_tinyint = t2.t_tinyint ',
't1.t_float = t2.t_float ', 't1.t_double = t2.t_double ', 't1.t_binary = t2.t_binary ' , 't1.t_nchar = t2.t_nchar ' ]
# session && fill
session_where = ['session(ts,10a)' , 'session(ts,10s)', 'session(ts,10m)' , 'session(ts,10h)','session(ts,10d)' , 'session(ts,10w)']
session_u_where = ['session(t1.ts,10a)' , 'session(t1.ts,10s)', 'session(t1.ts,10m)' , 'session(t1.ts,10h)','session(t1.ts,10d)' , 'session(t1.ts,10w)',
'session(t2.ts,10a)' , 'session(t2.ts,10s)', 'session(t2.ts,10m)' , 'session(t2.ts,10h)','session(t2.ts,10d)' , 'session(t2.ts,10w)']
fill_where = ['FILL(NONE)','FILL(PREV)','FILL(NULL)','FILL(LINEAR)','FILL(NEXT)','FILL(VALUE, 1.23)']
state_window = ['STATE_WINDOW(q_tinyint)','STATE_WINDOW(q_bigint)','STATE_WINDOW(q_int)','STATE_WINDOW(q_bool)','STATE_WINDOW(q_smallint)']
state_u_window = ['STATE_WINDOW(t1.q_tinyint)','STATE_WINDOW(t1.q_bigint)','STATE_WINDOW(t1.q_int)','STATE_WINDOW(t1.q_bool)','STATE_WINDOW(t1.q_smallint)',
'STATE_WINDOW(t2.q_tinyint)','STATE_WINDOW(t2.q_bigint)','STATE_WINDOW(t2.q_int)','STATE_WINDOW(t2.q_bool)','STATE_WINDOW(t2.q_smallint)']
# order by where
order_where = ['order by ts' , 'order by ts asc']
order_u_where = ['order by t1.ts' , 'order by t1.ts asc' , 'order by t2.ts' , 'order by t2.ts asc']
order_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' ]
orders_desc_where = ['order by ts' , 'order by ts asc' , 'order by ts desc' , 'order by loc' , 'order by loc asc' , 'order by loc desc']
# group by where,not include null-tag
group_where = ['group by tbname , loc' , 'group by tbname', 'group by tbname, t_bigint', 'group by tbname,t_int', 'group by tbname, t_smallint', 'group by tbname,t_tinyint',
'group by tbname,t_float', 'group by tbname,t_double' , 'group by tbname,t_binary', 'group by tbname,t_nchar', 'group by tbname,t_bool' ,'group by tbname ,loc ,t_bigint',
'group by tbname,t_binary ,t_nchar ,t_bool' , 'group by tbname,t_int ,t_smallint ,t_tinyint' , 'group by tbname,t_float ,t_double ' ]
having_support = ['having count(q_int) > 0','having count(q_bigint) > 0','having count(q_smallint) > 0','having count(q_tinyint) > 0','having count(q_float) > 0','having count(q_double) > 0','having count(q_bool) > 0',
'having avg(q_int) > 0','having avg(q_bigint) > 0','having avg(q_smallint) > 0','having avg(q_tinyint) > 0','having avg(q_float) > 0','having avg(q_double) > 0',
'having sum(q_int) > 0','having sum(q_bigint) > 0','having sum(q_smallint) > 0','having sum(q_tinyint) > 0','having sum(q_float) > 0','having sum(q_double) > 0',
'having STDDEV(q_int) > 0','having STDDEV(q_bigint) > 0','having STDDEV(q_smallint) > 0','having STDDEV(q_tinyint) > 0','having STDDEV(q_float) > 0','having STDDEV(q_double) > 0',
'having TWA(q_int) > 0','having TWA(q_bigint) > 0','having TWA(q_smallint) > 0','having TWA(q_tinyint) > 0','having TWA(q_float) > 0','having TWA(q_double) > 0',
'having IRATE(q_int) > 0','having IRATE(q_bigint) > 0','having IRATE(q_smallint) > 0','having IRATE(q_tinyint) > 0','having IRATE(q_float) > 0','having IRATE(q_double) > 0',
'having MIN(q_int) > 0','having MIN(q_bigint) > 0','having MIN(q_smallint) > 0','having MIN(q_tinyint) > 0','having MIN(q_float) > 0','having MIN(q_double) > 0',
'having MAX(q_int) > 0','having MAX(q_bigint) > 0','having MAX(q_smallint) > 0','having MAX(q_tinyint) > 0','having MAX(q_float) > 0','having MAX(q_double) > 0',
'having FIRST(q_int) > 0','having FIRST(q_bigint) > 0','having FIRST(q_smallint) > 0','having FIRST(q_tinyint) > 0','having FIRST(q_float) > 0','having FIRST(q_double) > 0',
'having LAST(q_int) > 0','having LAST(q_bigint) > 0','having LAST(q_smallint) > 0','having LAST(q_tinyint) > 0','having LAST(q_float) > 0','having LAST(q_double) > 0',
'having APERCENTILE(q_int,10) > 0','having APERCENTILE(q_bigint,10) > 0','having APERCENTILE(q_smallint,10) > 0','having APERCENTILE(q_tinyint,10) > 0','having APERCENTILE(q_float,10) > 0','having APERCENTILE(q_double,10) > 0']
having_not_support = ['having TOP(q_int,10) > 0','having TOP(q_bigint,10) > 0','having TOP(q_smallint,10) > 0','having TOP(q_tinyint,10) > 0','having TOP(q_float,10) > 0','having TOP(q_double,10) > 0','having TOP(q_bool,10) > 0',
'having BOTTOM(q_int,10) > 0','having BOTTOM(q_bigint,10) > 0','having BOTTOM(q_smallint,10) > 0','having BOTTOM(q_tinyint,10) > 0','having BOTTOM(q_float,10) > 0','having BOTTOM(q_double,10) > 0','having BOTTOM(q_bool,10) > 0',
'having LEASTSQUARES(q_int) > 0','having LEASTSQUARES(q_bigint) > 0','having LEASTSQUARES(q_smallint) > 0','having LEASTSQUARES(q_tinyint) > 0','having LEASTSQUARES(q_float) > 0','having LEASTSQUARES(q_double) > 0','having LEASTSQUARES(q_bool) > 0',
'having FIRST(q_bool) > 0','having IRATE(q_bool) > 0','having PERCENTILE(q_bool,10) > 0','having avg(q_bool) > 0','having LAST_ROW(q_bool) > 0','having sum(q_bool) > 0','having STDDEV(q_bool) > 0','having APERCENTILE(q_bool,10) > 0','having TWA(q_bool) > 0','having LAST(q_bool) > 0',
'having PERCENTILE(q_int,10) > 0','having PERCENTILE(q_bigint,10) > 0','having PERCENTILE(q_smallint,10) > 0','having PERCENTILE(q_tinyint,10) > 0','having PERCENTILE(q_float,10) > 0','having PERCENTILE(q_double,10) > 0']
having_tagnot_support = ['having LAST_ROW(q_int) > 0','having LAST_ROW(q_bigint) > 0','having LAST_ROW(q_smallint) > 0','having LAST_ROW(q_tinyint) > 0','having LAST_ROW(q_float) > 0','having LAST_ROW(q_double) > 0']
# limit offset where
limit_where = ['limit 1 offset 1' , 'limit 1' , 'limit 2 offset 1' , 'limit 2', 'limit 12 offset 1' , 'limit 20', 'limit 20 offset 10' , 'limit 200']
limit1_where = ['limit 1 offset 1' , 'limit 1' ]
limit_u_where = ['limit 100 offset 10' , 'limit 50' , 'limit 100' , 'limit 10' ]
# slimit soffset where
slimit_where = ['slimit 1 soffset 1' , 'slimit 1' , 'slimit 2 soffset 1' , 'slimit 2']
slimit1_where = ['slimit 2 soffset 1' , 'slimit 1' ]
# aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\]
# select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile]
# calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\]
# **_ns_** express is not support stable, therefore, separated from regular tables
# calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval
# calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname
# calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname
# calc_select_all calc_select_regular calc_select_in_ts calc_select_fill calc_select_not_interval
# select function include [all: min\max\first(*)\last(*)\top\bottom\apercentile\last_row(*)(not with interval)\interp(*)(FILL) ||regualr: percentile]
calc_select_all = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' ,
'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' ,
'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' ,
'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ,
'min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' ,
'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ,
'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' ,
'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' ,
'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)']
calc_select_in_ts = ['bottom(q_int,20)' , 'bottom(q_bigint,20)' , 'bottom(q_smallint,20)' , 'bottom(q_tinyint,20)' ,'bottom(q_float,20)' , 'bottom(q_double,20)' ,
'top(q_int,20)' , 'top(q_bigint,20)' , 'top(q_smallint,20)' ,'top(q_tinyint,20)' ,'top(q_float,20)' ,'top(q_double,20)' ,
'first(q_int)' , 'first(q_bigint)' , 'first(q_smallint)' , 'first(q_tinyint)' , 'first(q_float)' ,'first(q_double)' ,'first(q_binary)' ,'first(q_nchar)' ,'first(q_bool)' ,'first(q_ts)' ,
'last(q_int)' , 'last(q_bigint)' , 'last(q_smallint)' , 'last(q_tinyint)' , 'last(q_float)' ,'last(q_double)' , 'last(q_binary)' ,'last(q_nchar)' ,'last(q_bool)' ,'last(q_ts)' ]
calc_select_in = ['min(q_int)' , 'min(q_bigint)' , 'min(q_smallint)' , 'min(q_tinyint)' , 'min(q_float)' ,'min(q_double)' ,
'max(q_int)' , 'max(q_bigint)' , 'max(q_smallint)' , 'max(q_tinyint)' ,'max(q_float)' ,'max(q_double)' ,
'apercentile(q_int,20)' , 'apercentile(q_bigint,20)' ,'apercentile(q_smallint,20)' ,'apercentile(q_tinyint,20)' ,'apercentile(q_float,20)' ,'apercentile(q_double,20)' ,
'last_row(q_int)' , 'last_row(q_bigint)' , 'last_row(q_smallint)' , 'last_row(q_tinyint)' , 'last_row(q_float)' ,
'last_row(q_double)' , 'last_row(q_bool)' ,'last_row(q_binary)' ,'last_row(q_nchar)' ,'last_row(q_ts)']
calc_select_regular = [ 'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)']
calc_select_fill = ['INTERP(q_bool)' ,'INTERP(q_binary)' ,'INTERP(q_nchar)' ,'INTERP(q_ts)', 'INTERP(q_int)' ,'INTERP(*)' ,'INTERP(q_bigint)' ,'INTERP(q_smallint)' ,'INTERP(q_tinyint)', 'INTERP(q_float)' ,'INTERP(q_double)']
interp_where = ['ts = now' , 'ts = \'2020-09-13 20:26:40.000\'' , 'ts = \'2020-09-13 20:26:40.009\'' ,'tbname in (\'table_1\') and ts = now' ,'tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and ts = \'2020-09-13 20:26:40.000\'','tbname like \'table%\' and ts = \'2020-09-13 20:26:40.002\'']
#two table join
calc_select_in_ts_j = ['bottom(t1.q_int,20)' , 'bottom(t1.q_bigint,20)' , 'bottom(t1.q_smallint,20)' , 'bottom(t1.q_tinyint,20)' ,'bottom(t1.q_float,20)' , 'bottom(t1.q_double,20)' ,
'top(t1.q_int,20)' , 'top(t1.q_bigint,20)' , 'top(t1.q_smallint,20)' ,'top(t1.q_tinyint,20)' ,'top(t1.q_float,20)' ,'top(t1.q_double,20)' ,
'first(t1.q_int)' , 'first(t1.q_bigint)' , 'first(t1.q_smallint)' , 'first(t1.q_tinyint)' , 'first(t1.q_float)' ,'first(t1.q_double)' ,'first(t1.q_binary)' ,'first(t1.q_nchar)' ,'first(t1.q_bool)' ,'first(t1.q_ts)' ,
'last(t1.q_int)' , 'last(t1.q_bigint)' , 'last(t1.q_smallint)' , 'last(t1.q_tinyint)' , 'last(t1.q_float)' ,'last(t1.q_double)' , 'last(t1.q_binary)' ,'last(t1.q_nchar)' ,'last(t1.q_bool)' ,'last(t1.q_ts)' ,
'bottom(t2.q_int,20)' , 'bottom(t2.q_bigint,20)' , 'bottom(t2.q_smallint,20)' , 'bottom(t2.q_tinyint,20)' ,'bottom(t2.q_float,20)' , 'bottom(t2.q_double,20)' ,
'top(t2.q_int,20)' , 'top(t2.q_bigint,20)' , 'top(t2.q_smallint,20)' ,'top(t2.q_tinyint,20)' ,'top(t2.q_float,20)' ,'top(t2.q_double,20)' ,
'first(t2.q_int)' , 'first(t2.q_bigint)' , 'first(t2.q_smallint)' , 'first(t2.q_tinyint)' , 'first(t2.q_float)' ,'first(t2.q_double)' ,'first(t2.q_binary)' ,'first(t2.q_nchar)' ,'first(t2.q_bool)' ,'first(t2.q_ts)' ,
'last(t2.q_int)' , 'last(t2.q_bigint)' , 'last(t2.q_smallint)' , 'last(t2.q_tinyint)' , 'last(t2.q_float)' ,'last(t2.q_double)' , 'last(t2.q_binary)' ,'last(t2.q_nchar)' ,'last(t2.q_bool)' ,'last(t2.q_ts)']
calc_select_in_j = ['min(t1.q_int)' , 'min(t1.q_bigint)' , 'min(t1.q_smallint)' , 'min(t1.q_tinyint)' , 'min(t1.q_float)' ,'min(t1.q_double)' ,
'max(t1.q_int)' , 'max(t1.q_bigint)' , 'max(t1.q_smallint)' , 'max(t1.q_tinyint)' ,'max(t1.q_float)' ,'max(t1.q_double)' ,
'apercentile(t1.q_int,20)' , 'apercentile(t1.q_bigint,20)' ,'apercentile(t1.q_smallint,20)' ,'apercentile(t1.q_tinyint,20)' ,'apercentile(t1.q_float,20)' ,'apercentile(t1.q_double,20)' ,
'last_row(t1.q_int)' , 'last_row(t1.q_bigint)' , 'last_row(t1.q_smallint)' , 'last_row(t1.q_tinyint)' , 'last_row(t1.q_float)' ,
'last_row(t1.q_double)' , 'last_row(t1.q_bool)' ,'last_row(t1.q_binary)' ,'last_row(t1.q_nchar)' ,'last_row(t1.q_ts)' ,
'min(t2.q_int)' , 'min(t2.q_bigint)' , 'min(t2.q_smallint)' , 'min(t2.q_tinyint)' , 'min(t2.q_float)' ,'min(t2.q_double)' ,
'max(t2.q_int)' , 'max(t2.q_bigint)' , 'max(t2.q_smallint)' , 'max(t2.q_tinyint)' ,'max(t2.q_float)' ,'max(t2.q_double)' ,
'apercentile(t2.q_int,20)' , 'apercentile(t2.q_bigint,20)' ,'apercentile(t2.q_smallint,20)' ,'apercentile(t2.q_tinyint,20)' ,'apercentile(t2.q_float,20)' ,'apercentile(t2.q_double,20)' ,
'last_row(t2.q_int)' , 'last_row(t2.q_bigint)' , 'last_row(t2.q_smallint)' , 'last_row(t2.q_tinyint)' , 'last_row(t2.q_float)' ,
'last_row(t2.q_double)' , 'last_row(t2.q_bool)' ,'last_row(t2.q_binary)' ,'last_row(t2.q_nchar)' ,'last_row(t2.q_ts)']
calc_select_all_j = calc_select_in_ts_j + calc_select_in_j
calc_select_regular_j = [ 'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' ,
'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)']
calc_select_fill_j = ['INTERP(t1.q_bool)' ,'INTERP(t1.q_binary)' ,'INTERP(t1.q_nchar)' ,'INTERP(t1.q_ts)', 'INTERP(t1.q_int)' ,'INTERP(t1.*)' ,'INTERP(t1.q_bigint)' ,'INTERP(t1.q_smallint)' ,'INTERP(t1.q_tinyint)', 'INTERP(t1.q_float)' ,'INTERP(t1.q_double)' ,
'INTERP(t2.q_bool)' ,'INTERP(t2.q_binary)' ,'INTERP(t2.q_nchar)' ,'INTERP(t2.q_ts)', 'INTERP(t2.q_int)' ,'INTERP(t2.*)' ,'INTERP(t2.q_bigint)' ,'INTERP(t2.q_smallint)' ,'INTERP(t2.q_tinyint)', 'INTERP(t2.q_float)' ,'INTERP(t2.q_double)']
interp_where_j = ['t1.ts = now' , 't1.ts = \'2020-09-13 20:26:40.000\'' , 't1.ts = \'2020-09-13 20:26:40.009\'' ,'t2.ts = now' , 't2.ts = \'2020-09-13 20:26:40.000\'' , 't2.ts = \'2020-09-13 20:26:40.009\'' ,
't1.tbname in (\'table_1\') and t1.ts = now' ,'t1.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t1.ts = \'2020-09-13 20:26:40.000\'','t1.tbname like \'table%\' and t1.ts = \'2020-09-13 20:26:40.002\'',
't2.tbname in (\'table_1\') and t2.ts = now' ,'t2.tbname in (\'table_0\' ,\'table_1\',\'table_2\',\'table_3\',\'table_4\',\'table_5\') and t2.ts = \'2020-09-13 20:26:40.000\'','t2.tbname like \'table%\' and t2.ts = \'2020-09-13 20:26:40.002\'']
# calc_aggregate_all calc_aggregate_regular calc_aggregate_groupbytbname APERCENTILE\PERCENTILE
# aggregate function include [all:count(*)\avg\sum\stddev ||regualr:twa\irate\leastsquares ||group by tbname:twa\irate\]
calc_aggregate_all = ['count(*)' , 'count(q_int)' ,'count(q_bigint)' , 'count(q_smallint)' ,'count(q_tinyint)' ,'count(q_float)' ,
'count(q_double)' ,'count(q_binary)' ,'count(q_nchar)' ,'count(q_bool)' ,'count(q_ts)' ,
'avg(q_int)' ,'avg(q_bigint)' , 'avg(q_smallint)' ,'avg(q_tinyint)' ,'avg(q_float)' ,'avg(q_double)' ,
'sum(q_int)' ,'sum(q_bigint)' , 'sum(q_smallint)' ,'sum(q_tinyint)' ,'sum(q_float)' ,'sum(q_double)' ,
'STDDEV(q_int)' ,'STDDEV(q_bigint)' , 'STDDEV(q_smallint)' ,'STDDEV(q_tinyint)' ,'STDDEV(q_float)' ,'STDDEV(q_double)',
'APERCENTILE(q_int,10)' ,'APERCENTILE(q_bigint,20)' , 'APERCENTILE(q_smallint,30)' ,'APERCENTILE(q_tinyint,40)' ,'APERCENTILE(q_float,50)' ,'APERCENTILE(q_double,60)']
calc_aggregate_regular = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' ,
'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ,
'LEASTSQUARES(q_int,15,3)' , 'LEASTSQUARES(q_bigint,10,1)' , 'LEASTSQUARES(q_smallint,20,3)' ,'LEASTSQUARES(q_tinyint,10,4)' ,'LEASTSQUARES(q_float,6,4)' ,'LEASTSQUARES(q_double,3,1)' ,
'PERCENTILE(q_int,10)' ,'PERCENTILE(q_bigint,20)' , 'PERCENTILE(q_smallint,30)' ,'PERCENTILE(q_tinyint,40)' ,'PERCENTILE(q_float,50)' ,'PERCENTILE(q_double,60)']
calc_aggregate_groupbytbname = ['twa(q_int)' ,'twa(q_bigint)' , 'twa(q_smallint)' ,'twa(q_tinyint)' ,'twa (q_float)' ,'twa(q_double)' ,
'IRATE(q_int)' ,'IRATE(q_bigint)' , 'IRATE(q_smallint)' ,'IRATE(q_tinyint)' ,'IRATE (q_float)' ,'IRATE(q_double)' ]
#two table join
calc_aggregate_all_j = ['count(t1.*)' , 'count(t1.q_int)' ,'count(t1.q_bigint)' , 'count(t1.q_smallint)' ,'count(t1.q_tinyint)' ,'count(t1.q_float)' ,
'count(t1.q_double)' ,'count(t1.q_binary)' ,'count(t1.q_nchar)' ,'count(t1.q_bool)' ,'count(t1.q_ts)' ,
'avg(t1.q_int)' ,'avg(t1.q_bigint)' , 'avg(t1.q_smallint)' ,'avg(t1.q_tinyint)' ,'avg(t1.q_float)' ,'avg(t1.q_double)' ,
'sum(t1.q_int)' ,'sum(t1.q_bigint)' , 'sum(t1.q_smallint)' ,'sum(t1.q_tinyint)' ,'sum(t1.q_float)' ,'sum(t1.q_double)' ,
'STDDEV(t1.q_int)' ,'STDDEV(t1.q_bigint)' , 'STDDEV(t1.q_smallint)' ,'STDDEV(t1.q_tinyint)' ,'STDDEV(t1.q_float)' ,'STDDEV(t1.q_double)',
'APERCENTILE(t1.q_int,10)' ,'APERCENTILE(t1.q_bigint,20)' , 'APERCENTILE(t1.q_smallint,30)' ,'APERCENTILE(t1.q_tinyint,40)' ,'APERCENTILE(t1.q_float,50)' ,'APERCENTILE(t1.q_double,60)' ,
'count(t2.*)' , 'count(t2.q_int)' ,'count(t2.q_bigint)' , 'count(t2.q_smallint)' ,'count(t2.q_tinyint)' ,'count(t2.q_float)' ,
'count(t2.q_double)' ,'count(t2.q_binary)' ,'count(t2.q_nchar)' ,'count(t2.q_bool)' ,'count(t2.q_ts)' ,
'avg(t2.q_int)' ,'avg(t2.q_bigint)' , 'avg(t2.q_smallint)' ,'avg(t2.q_tinyint)' ,'avg(t2.q_float)' ,'avg(t2.q_double)' ,
'sum(t2.q_int)' ,'sum(t2.q_bigint)' , 'sum(t2.q_smallint)' ,'sum(t2.q_tinyint)' ,'sum(t2.q_float)' ,'sum(t2.q_double)' ,
'STDDEV(t2.q_int)' ,'STDDEV(t2.q_bigint)' , 'STDDEV(t2.q_smallint)' ,'STDDEV(t2.q_tinyint)' ,'STDDEV(t2.q_float)' ,'STDDEV(t2.q_double)',
'APERCENTILE(t2.q_int,10)' ,'APERCENTILE(t2.q_bigint,20)' , 'APERCENTILE(t2.q_smallint,30)' ,'APERCENTILE(t2.q_tinyint,40)' ,'APERCENTILE(t2.q_float,50)' ,'APERCENTILE(t2.q_double,60)']
calc_aggregate_regular_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' ,
'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' ,
'LEASTSQUARES(t1.q_int,15,3)' , 'LEASTSQUARES(t1.q_bigint,10,1)' , 'LEASTSQUARES(t1.q_smallint,20,3)' ,'LEASTSQUARES(t1.q_tinyint,10,4)' ,'LEASTSQUARES(t1.q_float,6,4)' ,'LEASTSQUARES(t1.q_double,3,1)' ,
'PERCENTILE(t1.q_int,10)' ,'PERCENTILE(t1.q_bigint,20)' , 'PERCENTILE(t1.q_smallint,30)' ,'PERCENTILE(t1.q_tinyint,40)' ,'PERCENTILE(t1.q_float,50)' ,'PERCENTILE(t1.q_double,60)' ,
'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' ,
'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)',
'LEASTSQUARES(t2.q_int,15,3)' , 'LEASTSQUARES(t2.q_bigint,10,1)' , 'LEASTSQUARES(t2.q_smallint,20,3)' ,'LEASTSQUARES(t2.q_tinyint,10,4)' ,'LEASTSQUARES(t2.q_float,6,4)' ,'LEASTSQUARES(t2.q_double,3,1)' ,
'PERCENTILE(t2.q_int,10)' ,'PERCENTILE(t2.q_bigint,20)' , 'PERCENTILE(t2.q_smallint,30)' ,'PERCENTILE(t2.q_tinyint,40)' ,'PERCENTILE(t2.q_float,50)' ,'PERCENTILE(t2.q_double,60)']
calc_aggregate_groupbytbname_j = ['twa(t1.q_int)' ,'twa(t1.q_bigint)' , 'twa(t1.q_smallint)' ,'twa(t1.q_tinyint)' ,'twa (t1.q_float)' ,'twa(t1.q_double)' ,
'IRATE(t1.q_int)' ,'IRATE(t1.q_bigint)' , 'IRATE(t1.q_smallint)' ,'IRATE(t1.q_tinyint)' ,'IRATE (t1.q_float)' ,'IRATE(t1.q_double)' ,
'twa(t2.q_int)' ,'twa(t2.q_bigint)' , 'twa(t2.q_smallint)' ,'twa(t2.q_tinyint)' ,'twa (t2.q_float)' ,'twa(t2.q_double)' ,
'IRATE(t2.q_int)' ,'IRATE(t2.q_bigint)' , 'IRATE(t2.q_smallint)' ,'IRATE(t2.q_tinyint)' ,'IRATE (t2.q_float)' ,'IRATE(t2.q_double)' ]
# calc_calculate_all calc_calculate_regular calc_calculate_groupbytbname
# calculation function include [all:spread\+-*/ ||regualr:diff\derivative ||group by tbname:diff\derivative\]
calc_calculate_all = ['SPREAD(ts)' , 'SPREAD(q_ts)' , 'SPREAD(q_int)' ,'SPREAD(q_bigint)' , 'SPREAD(q_smallint)' ,'SPREAD(q_tinyint)' ,'SPREAD(q_float)' ,'SPREAD(q_double)' ,
'(SPREAD(q_int) + SPREAD(q_bigint))' , '(SPREAD(q_smallint) - SPREAD(q_float))', '(SPREAD(q_double) * SPREAD(q_tinyint))' , '(SPREAD(q_double) / SPREAD(q_float))']
calc_calculate_regular = ['DIFF(q_int)' ,'DIFF(q_bigint)' , 'DIFF(q_smallint)' ,'DIFF(q_tinyint)' ,'DIFF(q_float)' ,'DIFF(q_double)' ,
'DERIVATIVE(q_int,15s,0)' , 'DERIVATIVE(q_bigint,10s,1)' , 'DERIVATIVE(q_smallint,20s,0)' ,'DERIVATIVE(q_tinyint,10s,1)' ,'DERIVATIVE(q_float,6s,0)' ,'DERIVATIVE(q_double,3s,1)' ]
calc_calculate_groupbytbname = calc_calculate_regular
#two table join
calc_calculate_all_j = ['SPREAD(t1.ts)' , 'SPREAD(t1.q_ts)' , 'SPREAD(t1.q_int)' ,'SPREAD(t1.q_bigint)' , 'SPREAD(t1.q_smallint)' ,'SPREAD(t1.q_tinyint)' ,'SPREAD(t1.q_float)' ,'SPREAD(t1.q_double)' ,
'SPREAD(t2.ts)' , 'SPREAD(t2.q_ts)' , 'SPREAD(t2.q_int)' ,'SPREAD(t2.q_bigint)' , 'SPREAD(t2.q_smallint)' ,'SPREAD(t2.q_tinyint)' ,'SPREAD(t2.q_float)' ,'SPREAD(t2.q_double)' ,
'(SPREAD(t1.q_int) + SPREAD(t1.q_bigint))' , '(SPREAD(t1.q_tinyint) - SPREAD(t1.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_tinyint))',
'(SPREAD(t2.q_int) + SPREAD(t2.q_bigint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t2.q_double) * SPREAD(t2.q_tinyint))' , '(SPREAD(t2.q_double) / SPREAD(t2.q_tinyint))',
'(SPREAD(t1.q_int) + SPREAD(t1.q_smallint))' , '(SPREAD(t2.q_smallint) - SPREAD(t2.q_float))', '(SPREAD(t1.q_double) * SPREAD(t1.q_tinyint))' , '(SPREAD(t1.q_double) / SPREAD(t1.q_float))']
calc_calculate_regular_j = ['DIFF(t1.q_int)' ,'DIFF(t1.q_bigint)' , 'DIFF(t1.q_smallint)' ,'DIFF(t1.q_tinyint)' ,'DIFF(t1.q_float)' ,'DIFF(t1.q_double)' ,
'DERIVATIVE(t1.q_int,15s,0)' , 'DERIVATIVE(t1.q_bigint,10s,1)' , 'DERIVATIVE(t1.q_smallint,20s,0)' ,'DERIVATIVE(t1.q_tinyint,10s,1)' ,'DERIVATIVE(t1.q_float,6s,0)' ,'DERIVATIVE(t1.q_double,3s,1)' ,
'DIFF(t2.q_int)' ,'DIFF(t2.q_bigint)' , 'DIFF(t2.q_smallint)' ,'DIFF(t2.q_tinyint)' ,'DIFF(t2.q_float)' ,'DIFF(t2.q_double)' ,
'DERIVATIVE(t2.q_int,15s,0)' , 'DERIVATIVE(t2.q_bigint,10s,1)' , 'DERIVATIVE(t2.q_smallint,20s,0)' ,'DERIVATIVE(t2.q_tinyint,10s,1)' ,'DERIVATIVE(t2.q_float,6s,0)' ,'DERIVATIVE(t2.q_double,3s,1)' ]
calc_calculate_groupbytbname_j = calc_calculate_regular_j
#inter && calc_aggregate_all\calc_aggregate_regular\calc_select_all
interval_sliding = ['interval(4w) sliding(1w) ','interval(1w) sliding(1d) ','interval(1d) sliding(1h) ' ,
'interval(1h) sliding(1m) ','interval(1m) sliding(1s) ','interval(1s) sliding(10a) ',
'interval(1y) ','interval(1n) ','interval(1w) ','interval(1d) ','interval(1h) ','interval(1m) ','interval(1s) ' ,'interval(10a)',
'interval(1y,1n) ','interval(1n,1w) ','interval(1w,1d) ','interval(1d,1h) ','interval(1h,1m) ','interval(1m,1s) ','interval(1s,10a) ' ,'interval(100a,30a)']
for i in range(self.fornum):
tdSql.query("select 1-1 from table_0;")
sql = "select count(*) from (select count(*) from stable_1 where ts>= 1620000000000 interval(1d) group by tbname) interval(1d);"
tdLog.info(sql)
tdSql.query(sql)
tdSql.checkData(0,0,'2021-08-27 00:00:00.000')
tdSql.checkData(0,1,3)
tdSql.checkData(1,0,'2021-08-28 00:00:00.000')
tdSql.checkData(1,1,3)
tdSql.checkData(2,0,'2021-08-29 00:00:00.000')
tdSql.checkRows(12)
#sql = "select * from ( select * from regular_table_1 where q_tinyint >= -127 and q_tinyint <= 127 order by ts );"
tdSql.query("select 1-2 from table_0;")
sql = "select * from ( select * from regular_table_1 where "
sql += "%s );" % random.choice(q_where)
datacheck = self.regular1_checkall_0(sql)
tdSql.checkRows(100)
datacheck = self.regular1_checkall_100(sql)
#sql = "select * from ( select * from regular_table_1 ) where q_binary like 'binary%' or q_binary = '0' order by ts asc ;"
tdSql.query("select 1-3 from table_0;")
sql = "select * from ( select * from regular_table_1 ) where "
sql += "%s ;" % random.choice(q_where)
datacheck = self.regular1_checkall_0(sql)
tdSql.checkRows(100)
datacheck = self.regular1_checkall_100(sql)
#sql = select * from ( select * from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and t1.q_double >= -1000000000 and t1.q_double <= 1000000000 and t2.q_double >= -1000000000 and t2.q_double <= 1000000000 order by t2.ts asc );;
tdSql.query("select 1-4 from table_0;")
sql = "select * from ( select * from regular_table_1 t1 , regular_table_2 t2 where t1.ts = t2.ts and "
sql += "%s )" % random.choice(q_u_where)
datacheck = self.regular_join_checkall_0(sql)
tdSql.checkRows(100)
datacheck = self.regular_join_checkall_100(sql)
#sql = "select * from ( select * from stable_1 where q_tinyint >= -127 and q_tinyint <= 127 order by ts );"
tdSql.query("select 2-1 from stable_1;")
sql = "select * from ( select * from stable_1 where "
sql += "%s );" % random.choice(qt_where)
datacheck = self.stable1_checkall_0(sql)
tdSql.checkRows(300)
datacheck = self.stable1_checkall_300(sql)
#sql = "select * from ( select * from stable_1 ) order by ts asc ;"
tdSql.query("select 2-2 from stable_1;")
sql = "select * from ( select * from stable_1 ) where "
sql += "%s ;" % random.choice(qt_where)
datacheck = self.stable1_checkall_0(sql)
tdSql.checkRows(300)
datacheck = self.stable1_checkall_300(sql)
#sql = "select * from ( select * from table_0 ) where q_binary like 'binary%' or q_binary = '0' order by ts asc ;"
tdSql.query("select 2-3 from stable_1;")
sql = "select * from ( select * from table_0 ) where "
sql += "%s ;" % random.choice(q_where)
datacheck = self.regular1_checkall_0(sql)
tdSql.checkRows(100)
datacheck = self.regular1_checkall_100(sql)
#sql = "select * from ( select * from table_0 where q_binary like 'binary%' or q_binary = '0' order by ts asc );"
tdSql.query("select 2-4 from stable_1;")
sql = "select * from ( select * from table_0 where "
sql += "%s );" % random.choice(q_where)
datacheck = self.regular1_checkall_0(sql)
tdSql.checkRows(100)
datacheck = self.regular1_checkall_100(sql)
#sql = select * from ( select * from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and t1.t_int = t2.t_int ) ;;
tdSql.query("select 2-5 from stable_1;")
sql = "select * from ( select * from stable_1 t1 , stable_2 t2 where t1.ts = t2.ts and "
sql += "%s )" % random.choice(t_join_where)
datacheck = self.stable_join_checkall_0(sql)
tdSql.checkRows(100)
datacheck = self.stable_join_checkall_100(sql)
endTime = time.time()
print("total time %ds" % (endTime - startTime))
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
......@@ -103,7 +103,6 @@ class TDTestCase:
select count(*) as count, loc from st where ts between 1600000000000 and 1600000000010 group by loc''')
tdSql.checkRows(6)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
......
......@@ -63,6 +63,7 @@ class TDTestCase:
tdLog.sleep(3)
# test case for https://jira.taosdata.com:18080/browse/TS-402
tdLog.info("test case for update option 1")
tdSql.execute("create database test update 1")
tdSql.execute("use test")
......@@ -75,7 +76,39 @@ class TDTestCase:
tdSql.checkData(0, 2, None)
tdSql.checkData(0, 3, 9)
tdSql.execute("drop table if exists tb")
tdSql.execute("create table tb (ts timestamp, c1 int, c2 int, c3 int)")
tdSql.execute("insert into tb values(%d, 1, 2, 3)(%d, null, 4, 5)(%d, 6, null, 7)" % (self.ts, self.ts, self.ts))
tdSql.query("select * from tb")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 6)
tdSql.checkData(0, 2, None)
tdSql.checkData(0, 3, 7)
# https://jira.taosdata.com:18080/browse/TS-424
tdLog.info("test case for update option 2")
tdSql.execute("create database db2 update 2")
tdSql.execute("use db2")
tdSql.execute("create table tb (ts timestamp, c1 int, c2 int, c3 int)")
tdSql.execute("insert into tb values(%d, 1, 2, 3)(%d, null, null, 9)" % (self.ts, self.ts))
tdSql.query("select * from tb")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 1)
tdSql.checkData(0, 2, 2)
tdSql.checkData(0, 3, 9)
tdSql.execute("drop table if exists tb")
tdSql.execute("create table tb (ts timestamp, c1 int, c2 int, c3 int)")
tdSql.execute("insert into tb values(%d, 1, 2, 3)(%d, null, 4, 5)(%d, 6, null, 7)" % (self.ts, self.ts, self.ts))
tdSql.query("select * from tb")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 6)
tdSql.checkData(0, 2, 4)
tdSql.checkData(0, 3, 7)
def stop(self):
......
......@@ -445,7 +445,7 @@ if $rows != $val then
endi
print ================>TD-5600
sql select first(join_tb0.c8),first(join_tb0.c9) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb1.ts>=100000 interval(1s) fill(linear);
sql select first(join_tb0.c8),first(join_tb0.c9) from join_tb1 , join_tb0 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb1.ts>=100000 interval(1s);
#===============================================================
......
......@@ -549,4 +549,381 @@ if $data11 != 2.000000000 then
return -1
endi
sql create database test2;
sql use test2;
sql create table meters (ts TIMESTAMP,a INT,b INT) TAGS (area INT);
sql CREATE TABLE t0 USING meters TAGS (0);
sql CREATE TABLE t1 USING meters TAGS (1);
sql CREATE TABLE t2 USING meters TAGS (1);
sql CREATE TABLE t3 USING meters TAGS (0);
sql insert into t0 values ('2021-09-30 15:00:00.00',0,0);
sql insert into t0 values ('2021-09-30 15:00:01.00',1,1);
sql insert into t0 values ('2021-09-30 15:00:03.00',3,3);
sql insert into t0 values ('2021-09-30 15:00:05.00',5,5);
sql insert into t0 values ('2021-09-30 15:00:07.00',7,7);
sql insert into t0 values ('2021-09-30 15:00:09.00',9,9);
sql insert into t1 values ('2021-09-30 15:00:00.00',0,0);
sql insert into t1 values ('2021-09-30 15:00:02.00',2,2);
sql insert into t1 values ('2021-09-30 15:00:04.00',4,4);
sql insert into t1 values ('2021-09-30 15:00:06.00',6,6);
sql insert into t1 values ('2021-09-30 15:00:08.00',8,8);
sql insert into t1 values ('2021-09-30 15:00:10.00',10,10);
sql insert into t2 values ('2021-09-30 15:00:00.00',0,0);
sql insert into t2 values ('2021-09-30 15:00:01.00',11,11);
sql insert into t2 values ('2021-09-30 15:00:02.00',22,22);
sql insert into t2 values ('2021-09-30 15:00:03.00',33,33);
sql insert into t2 values ('2021-09-30 15:00:04.00',44,44);
sql insert into t2 values ('2021-09-30 15:00:05.00',55,55);
sql insert into t3 values ('2021-09-30 15:00:00.00',0,0);
sql insert into t3 values ('2021-09-30 15:00:01.00',11,11);
sql insert into t3 values ('2021-09-30 15:00:02.00',22,22);
sql insert into t3 values ('2021-09-30 15:00:03.00',33,33);
sql insert into t3 values ('2021-09-30 15:00:04.00',44,44);
sql insert into t3 values ('2021-09-30 15:00:05.00',55,55);
sql select count(*) from meters interval(1s) group by tbname;
if $rows != 24 then
return -1
endi
sql select count(*) from (select count(*) from meters interval(1s) group by tbname) interval(1s);
if $rows != 11 then
return -1
endi
if $data00 != @21-09-30 15:00:00.000@ then
return -1
endi
if $data01 != 4 then
return -1
endi
if $data10 != @21-09-30 15:00:01.000@ then
return -1
endi
if $data11 != 3 then
return -1
endi
if $data20 != @21-09-30 15:00:02.000@ then
return -1
endi
if $data21 != 3 then
return -1
endi
if $data30 != @21-09-30 15:00:03.000@ then
return -1
endi
if $data31 != 3 then
return -1
endi
if $data40 != @21-09-30 15:00:04.000@ then
return -1
endi
if $data41 != 3 then
return -1
endi
if $data50 != @21-09-30 15:00:05.000@ then
return -1
endi
if $data51 != 3 then
return -1
endi
if $data60 != @21-09-30 15:00:06.000@ then
return -1
endi
if $data61 != 1 then
return -1
endi
if $data70 != @21-09-30 15:00:07.000@ then
return -1
endi
if $data71 != 1 then
return -1
endi
if $data80 != @21-09-30 15:00:08.000@ then
return -1
endi
if $data81 != 1 then
return -1
endi
if $data90 != @21-09-30 15:00:09.000@ then
return -1
endi
if $data91 != 1 then
return -1
endi
sql select count(*) from (select count(*) from meters interval(1s) group by area) interval(1s);
if $rows != 11 then
return -1
endi
if $data00 != @21-09-30 15:00:00.000@ then
return -1
endi
if $data01 != 2 then
return -1
endi
if $data10 != @21-09-30 15:00:01.000@ then
return -1
endi
if $data11 != 2 then
return -1
endi
if $data20 != @21-09-30 15:00:02.000@ then
return -1
endi
if $data21 != 2 then
return -1
endi
if $data30 != @21-09-30 15:00:03.000@ then
return -1
endi
if $data31 != 2 then
return -1
endi
if $data40 != @21-09-30 15:00:04.000@ then
return -1
endi
if $data41 != 2 then
return -1
endi
if $data50 != @21-09-30 15:00:05.000@ then
return -1
endi
if $data51 != 2 then
return -1
endi
if $data60 != @21-09-30 15:00:06.000@ then
return -1
endi
if $data61 != 1 then
return -1
endi
if $data70 != @21-09-30 15:00:07.000@ then
return -1
endi
if $data71 != 1 then
return -1
endi
if $data80 != @21-09-30 15:00:08.000@ then
return -1
endi
if $data81 != 1 then
return -1
endi
if $data90 != @21-09-30 15:00:09.000@ then
return -1
endi
if $data91 != 1 then
return -1
endi
sql select sum(sa) from (select sum(a) as sa from meters interval(1s) group by tbname) interval(1s);
if $rows != 11 then
return -1
endi
if $data00 != @21-09-30 15:00:00.000@ then
return -1
endi
if $data01 != 0 then
return -1
endi
if $data10 != @21-09-30 15:00:01.000@ then
return -1
endi
if $data11 != 23 then
return -1
endi
if $data20 != @21-09-30 15:00:02.000@ then
return -1
endi
if $data21 != 46 then
return -1
endi
if $data30 != @21-09-30 15:00:03.000@ then
return -1
endi
if $data31 != 69 then
return -1
endi
if $data40 != @21-09-30 15:00:04.000@ then
return -1
endi
if $data41 != 92 then
return -1
endi
if $data50 != @21-09-30 15:00:05.000@ then
return -1
endi
if $data51 != 115 then
return -1
endi
if $data60 != @21-09-30 15:00:06.000@ then
return -1
endi
if $data61 != 6 then
return -1
endi
if $data70 != @21-09-30 15:00:07.000@ then
return -1
endi
if $data71 != 7 then
return -1
endi
if $data80 != @21-09-30 15:00:08.000@ then
return -1
endi
if $data81 != 8 then
return -1
endi
if $data90 != @21-09-30 15:00:09.000@ then
return -1
endi
if $data91 != 9 then
return -1
endi
sql select sum(sa) from (select sum(a) as sa from meters interval(1s) group by area) interval(1s);
if $rows != 11 then
return -1
endi
if $data00 != @21-09-30 15:00:00.000@ then
return -1
endi
if $data01 != 0 then
return -1
endi
if $data10 != @21-09-30 15:00:01.000@ then
return -1
endi
if $data11 != 23 then
return -1
endi
if $data20 != @21-09-30 15:00:02.000@ then
return -1
endi
if $data21 != 46 then
return -1
endi
if $data30 != @21-09-30 15:00:03.000@ then
return -1
endi
if $data31 != 69 then
return -1
endi
if $data40 != @21-09-30 15:00:04.000@ then
return -1
endi
if $data41 != 92 then
return -1
endi
if $data50 != @21-09-30 15:00:05.000@ then
return -1
endi
if $data51 != 115 then
return -1
endi
if $data60 != @21-09-30 15:00:06.000@ then
return -1
endi
if $data61 != 6 then
return -1
endi
if $data70 != @21-09-30 15:00:07.000@ then
return -1
endi
if $data71 != 7 then
return -1
endi
if $data80 != @21-09-30 15:00:08.000@ then
return -1
endi
if $data81 != 8 then
return -1
endi
if $data90 != @21-09-30 15:00:09.000@ then
return -1
endi
if $data91 != 9 then
return -1
endi
sql select count(*) from (select count(*) from meters interval(1s)) interval(1s);
if $rows != 11 then
return -1
endi
if $data00 != @21-09-30 15:00:00.000@ then
return -1
endi
if $data01 != 1 then
return -1
endi
if $data10 != @21-09-30 15:00:01.000@ then
return -1
endi
if $data11 != 1 then
return -1
endi
if $data20 != @21-09-30 15:00:02.000@ then
return -1
endi
if $data21 != 1 then
return -1
endi
if $data30 != @21-09-30 15:00:03.000@ then
return -1
endi
if $data31 != 1 then
return -1
endi
if $data40 != @21-09-30 15:00:04.000@ then
return -1
endi
if $data41 != 1 then
return -1
endi
if $data50 != @21-09-30 15:00:05.000@ then
return -1
endi
if $data51 != 1 then
return -1
endi
if $data60 != @21-09-30 15:00:06.000@ then
return -1
endi
if $data61 != 1 then
return -1
endi
if $data70 != @21-09-30 15:00:07.000@ then
return -1
endi
if $data71 != 1 then
return -1
endi
if $data80 != @21-09-30 15:00:08.000@ then
return -1
endi
if $data81 != 1 then
return -1
endi
if $data90 != @21-09-30 15:00:09.000@ then
return -1
endi
if $data91 != 1 then
return -1
endi
sql select count(*) from (select count(*) from meters interval(1s) group by tbname);
if $rows != 1 then
return -1
endi
if $data00 != 24 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册