提交 1782ecd5 编写于 作者: dengyihao's avatar dengyihao

Merge branch 'fix/shared_link_rocksdb' into enh/addCompileError

......@@ -77,6 +77,12 @@ ELSEIF (TD_DARWIN_64)
ENDIF ()
ENDIF ()
option(
BUILD_GEOS
"If build geos on Windows"
ON
)
option(
BUILD_SHARED_LIBS
""
......
......@@ -269,8 +269,11 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
IF (TD_LINUX)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON)
ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
ENDIF()
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
......
此差异已折叠。
......@@ -1008,8 +1008,7 @@ SAMPLE(expr, k)
**More explanations**:
This function cannot be used in expression calculation.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
- This function cannot be used in expression calculation.
### TAIL
......@@ -1088,7 +1087,6 @@ CSUM(expr)
- Arithmetic operation can't be performed on the result of `csum` function
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### DERIVATIVE
......@@ -1112,7 +1110,6 @@ ignore_negative: {
**More explanation**:
- It can be used together with `PARTITION BY tbname` against a STable.
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
### DIFF
......@@ -1175,7 +1172,6 @@ MAVG(expr, k)
- Arithmetic operation can't be performed on the result of `MAVG`.
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### STATECOUNT
......@@ -1201,7 +1197,6 @@ STATECOUNT(expr, oper, val)
**More explanations**:
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window
......@@ -1229,7 +1224,6 @@ STATEDURATION(expr, oper, val, unit)
**More explanations**:
- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window
......@@ -1247,7 +1241,6 @@ TWA(expr)
**Applicable table types**: standard tables and supertables
- Must be used together with `PARTITION BY tbname` to force the result into each single timeline.
## System Information Functions
......
......@@ -111,7 +111,7 @@ The parameters described in this document by the effect that they have on the sy
| Attribute | Description |
| ------------- | ---------------------------------------------- |
| Applicable | Client/Server |
| Meaning | The maximum waiting time to get avaliable conn |
| Meaning | The maximum waiting time to get available conn |
| Value Range | 10-50000000(ms) |
| Default Value | 500000 |
......
此差异已折叠。
......@@ -1001,7 +1001,6 @@ SAMPLE(expr, k)
**使用说明**
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
### TAIL
......@@ -1080,7 +1079,6 @@ CSUM(expr)
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### DERIVATIVE
......@@ -1104,7 +1102,6 @@ ignore_negative: {
**使用说明**:
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
### DIFF
......@@ -1167,7 +1164,6 @@ MAVG(expr, k)
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### STATECOUNT
......@@ -1193,7 +1189,6 @@ STATECOUNT(expr, oper, val)
**使用说明**
- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
......@@ -1221,7 +1216,6 @@ STATEDURATION(expr, oper, val, unit)
**使用说明**
- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
......@@ -1239,8 +1233,6 @@ TWA(expr)
**适用于**:表和超级表。
**使用说明**: TWA 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
## 系统信息函数
......
......@@ -180,6 +180,7 @@ extern int32_t tsRpcRetryInterval;
extern bool tsDisableStream;
extern int64_t tsStreamBufferSize;
extern int64_t tsCheckpointInterval;
extern bool tsFilterScalarMode;
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
......
......@@ -149,6 +149,7 @@ struct SWalReader {
TdFilePtr pIdxFile;
int64_t curFileFirstVer;
int64_t curVersion;
int64_t skipToVersion; // skip data and jump to destination version, usually used by stream resume ignoring untreated data
int64_t capacity;
TdThreadMutex mutex;
SWalFilterCond cond;
......@@ -200,6 +201,8 @@ int32_t walReaderSeekVer(SWalReader *pRead, int64_t ver);
int32_t walNextValidMsg(SWalReader *pRead);
int64_t walReaderGetCurrentVer(const SWalReader *pReader);
int64_t walReaderGetValidFirstVer(const SWalReader *pReader);
int64_t walReaderGetSkipToVersion(SWalReader *pReader);
void walReaderSetSkipToVersion(SWalReader *pReader, int64_t ver);
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever);
void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset);
......
......@@ -208,6 +208,7 @@ char tsUdfdLdLibPath[512] = "";
bool tsDisableStream = false;
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000;
bool tsFilterScalarMode = false;
#ifndef _STORAGE
int32_t taosSetTfsCfg(SConfig *pCfg) {
......@@ -522,6 +523,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, 0) != 0) return -1;
if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, 0) != 0) return -1;
GRANT_CFG_ADD;
return 0;
}
......@@ -898,6 +901,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsStreamBufferSize = cfgGetItem(pCfg, "streamBufferSize")->i64;
tsCheckpointInterval = cfgGetItem(pCfg, "checkpointInterval")->i64;
tsFilterScalarMode = cfgGetItem(pCfg, "filterScalarMode")->bval;
GRANT_CFG_GET;
return 0;
}
......
......@@ -87,6 +87,28 @@ target_include_directories(
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
IF (TD_LINUX)
target_link_libraries(
vnode
PUBLIC os
PUBLIC util
PUBLIC common
PUBLIC tfs
PUBLIC wal
PUBLIC qworker
PUBLIC sync
PUBLIC executor
PUBLIC scheduler
PUBLIC tdb
# PUBLIC bdb
# PUBLIC scalar
PUBLIC rocksdb-shared
PUBLIC transport
PUBLIC stream
PUBLIC index
)
ELSE()
target_link_libraries(
vnode
PUBLIC os
......@@ -107,6 +129,7 @@ target_link_libraries(
PUBLIC stream
PUBLIC index
)
ENDIF()
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)
......
......@@ -1304,19 +1304,22 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg;
int32_t vgId = pTq->pStreamMeta->vgId;
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
if (pTask) {
atomic_store_8(&pTask->status.taskStatus, pTask->status.keepTaskStatus);
// no lock needs to secure the access of the version
if (pReq->igUntreated && pTask->taskLevel == TASK_LEVEL__SOURCE) { // discard all the data when the stream task is suspended.
pTask->chkInfo.currentVer = sversion;
walReaderSeekVer(pTask->exec.pWalReader, sversion);
tqDebug("vgId:%d s-task:%s resume to normal from the latest version:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d", pTq->pStreamMeta->vgId,
pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
if (pReq->igUntreated && pTask->taskLevel == TASK_LEVEL__SOURCE) {
// discard all the data when the stream task is suspended.
walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion);
tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64
", schedStatus:%d",
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
} else { // from the previous paused version and go on
tqDebug("vgId:%d s-task:%s resume to normal from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d", pTq->pStreamMeta->vgId,
pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d",
vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
}
if (pTask->taskLevel == TASK_LEVEL__SOURCE && taosQueueItemSize(pTask->inputQueue->queue) == 0) {
......@@ -1325,6 +1328,8 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
streamSchedExec(pTask);
}
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
} else {
tqError("vgId:%d failed to find the s-task:0x%x for resume stream task", vgId, pReq->taskId);
}
return 0;
......@@ -1433,7 +1438,7 @@ int32_t tqStartStreamTasks(STQ* pTq) {
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
if (numOfTasks == 0) {
tqInfo("vgId:%d no stream tasks exist", vgId);
taosWUnLockLatch(&pTq->pStreamMeta->lock);
taosWUnLockLatch(&pMeta->lock);
return 0;
}
......@@ -1441,7 +1446,7 @@ int32_t tqStartStreamTasks(STQ* pTq) {
if (pMeta->walScanCounter > 1) {
tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->walScanCounter);
taosWUnLockLatch(&pTq->pStreamMeta->lock);
taosWUnLockLatch(&pMeta->lock);
return 0;
}
......@@ -1449,7 +1454,7 @@ int32_t tqStartStreamTasks(STQ* pTq) {
if (pRunReq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("vgId:%d failed to create msg to start wal scanning to launch stream tasks, code:%s", vgId, terrstr());
taosWUnLockLatch(&pTq->pStreamMeta->lock);
taosWUnLockLatch(&pMeta->lock);
return -1;
}
......@@ -1460,7 +1465,7 @@ int32_t tqStartStreamTasks(STQ* pTq) {
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
taosWUnLockLatch(&pTq->pStreamMeta->lock);
taosWUnLockLatch(&pMeta->lock);
return 0;
}
......@@ -87,6 +87,16 @@ static int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) {
}
}
int64_t skipToVer = walReaderGetSkipToVersion(pTask->exec.pWalReader);
if (skipToVer != 0 && skipToVer > pTask->chkInfo.currentVer) {
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, skipToVer);
if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
return code;
}
tqDebug("vgId:%d s-task:%s wal reader jump to ver:%" PRId64, vgId, pTask->id.idStr, skipToVer);
}
return TSDB_CODE_SUCCESS;
}
......
......@@ -573,8 +573,18 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD
if (colDataIsNull_var(pDst, j)) {
colDataSetNull_var(pDst, numOfRows);
} else {
// fix address sanitizer error. p1 may point to memory that will change during realloc of colDataSetVal, first copy it to p2
char* p1 = colDataGetVarData(pDst, j);
colDataSetVal(pDst, numOfRows, p1, false);
int32_t len = 0;
if (pDst->info.type == TSDB_DATA_TYPE_JSON) {
len = getJsonValueLen(p1);
} else {
len = varDataTLen(p1);
}
char* p2 = taosMemoryMalloc(len);
memcpy(p2, p1, len);
colDataSetVal(pDst, numOfRows, p2, false);
taosMemoryFree(p2);
}
numOfRows += 1;
j += 1;
......
......@@ -600,9 +600,9 @@ SUdf *udfdGetOrCreateUdf(const char *udfName) {
return udf;
} else {
(*pUdfHash)->expired = true;
taosHashRemove(global.udfsHash, udfName, strlen(udfName));
fnInfo("udfd expired, check for new version. existing udf %s udf version %d, udf created time %" PRIx64,
(*pUdfHash)->name, (*pUdfHash)->version, (*pUdfHash)->createdTime);
taosHashRemove(global.udfsHash, udfName, strlen(udfName));
}
}
......
......@@ -227,8 +227,10 @@ typedef struct SFltTreeStat {
SFilterInfo *info;
} SFltTreeStat;
typedef struct SFltScalarCtx {
SNode *node;
SArray* fltSclRange;
} SFltScalarCtx;
typedef struct SFltBuildGroupCtx {
......@@ -237,6 +239,11 @@ typedef struct SFltBuildGroupCtx {
int32_t code;
} SFltBuildGroupCtx;
typedef struct {
SColumnNode *colNode;
SArray *points;
} SFltSclColumnRange;
struct SFilterInfo {
bool scalarMode;
SFltScalarCtx sclCtx;
......
此差异已折叠。
......@@ -1791,7 +1791,11 @@ void vectorNotMatch(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOu
void vectorIsNull(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut, int32_t _ord) {
for (int32_t i = 0; i < pLeft->numOfRows; ++i) {
int8_t v = IS_HELPER_NULL(pLeft->columnData, i) ? 1 : 0;
if (v) {
++pOut->numOfQualified;
}
colDataSetInt8(pOut->columnData, i, &v);
colDataClearNull_f(pOut->columnData->nullbitmap, i);
}
pOut->numOfRows = pLeft->numOfRows;
}
......@@ -1799,7 +1803,11 @@ void vectorIsNull(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut,
void vectorNotNull(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut, int32_t _ord) {
for (int32_t i = 0; i < pLeft->numOfRows; ++i) {
int8_t v = IS_HELPER_NULL(pLeft->columnData, i) ? 0 : 1;
if (v) {
++pOut->numOfQualified;
}
colDataSetInt8(pOut->columnData, i, &v);
colDataClearNull_f(pOut->columnData->nullbitmap, i);
}
pOut->numOfRows = pLeft->numOfRows;
}
......@@ -1812,6 +1820,13 @@ void vectorIsTrue(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut,
colDataSetInt8(pOut->columnData, i, &v);
colDataClearNull_f(pOut->columnData->nullbitmap, i);
}
{
bool v = false;
GET_TYPED_DATA(v, bool, pOut->columnData->info.type, colDataGetData(pOut->columnData, i));
if (v) {
++pOut->numOfQualified;
}
}
}
pOut->columnData->hasNull = false;
}
......@@ -1851,7 +1866,9 @@ void vectorJsonContains(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam
char *pLeftData = colDataGetVarData(pLeft->columnData, i);
getJsonValue(pLeftData, jsonKey, &isExist);
}
if (isExist) {
++pOut->numOfQualified;
}
colDataSetVal(pOutputCol, i, (const char *)(&isExist), false);
}
taosMemoryFree(jsonKey);
......
......@@ -6,13 +6,23 @@ target_include_directories(
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
if(${BUILD_WITH_ROCKSDB})
IF (TD_LINUX)
target_link_libraries(
stream
PUBLIC rocksdb-shared tdb
PRIVATE os util transport qcom executor wal index
)
ELSE()
target_link_libraries(
stream
PUBLIC rocksdb tdb
PRIVATE os util transport qcom executor wal index
)
ENDIF()
target_include_directories(
stream
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
......
......@@ -108,6 +108,14 @@ int32_t walNextValidMsg(SWalReader *pReader) {
int64_t walReaderGetCurrentVer(const SWalReader *pReader) { return pReader->curVersion; }
int64_t walReaderGetValidFirstVer(const SWalReader *pReader) { return walGetFirstVer(pReader->pWal); }
void walReaderSetSkipToVersion(SWalReader *pReader, int64_t ver) { atomic_store_64(&pReader->skipToVersion, ver); }
// this function is NOT multi-thread safe, and no need to be.
int64_t walReaderGetSkipToVersion(SWalReader *pReader) {
int64_t newVersion = pReader->skipToVersion;
pReader->skipToVersion = 0;
return newVersion;
}
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever) {
*sver = walGetFirstVer(pReader->pWal);
......
......@@ -1274,6 +1274,7 @@
,,y,script,./test.sh -f tsim/parser/columnValue_tinyint.sim
,,y,script,./test.sh -f tsim/parser/columnValue_unsign.sim
,,y,script,./test.sh -f tsim/parser/condition.sim
,,y,script,./test.sh -f tsim/parser/condition_scl.sim
,,y,script,./test.sh -f tsim/parser/constCol.sim
,,y,script,./test.sh -f tsim/parser/create_db.sim
,,y,script,./test.sh -f tsim/parser/create_mt.sim
......@@ -1353,6 +1354,7 @@
,,y,script,./test.sh -f tsim/query/event.sim
,,y,script,./test.sh -f tsim/query/forceFill.sim
,,y,script,./test.sh -f tsim/query/emptyTsRange.sim
,,y,script,./test.sh -f tsim/query/emptyTsRange_scl.sim
,,y,script,./test.sh -f tsim/query/partitionby.sim
,,y,script,./test.sh -f tsim/query/tableCount.sim
,,y,script,./test.sh -f tsim/query/tag_scan.sim
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c filterScalarMode -v 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql drop database if exists cdb
sql create database if not exists cdb
sql use cdb
sql create table stb1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double)
sql create table tb1 using stb1 tags(1,'1',1.0)
sql create table tb2 using stb1 tags(2,'2',2.0)
sql create table tb3 using stb1 tags(3,'3',3.0)
sql create table tb4 using stb1 tags(4,'4',4.0)
sql create table tb5 using stb1 tags(5,'5',5.0)
sql create table tb6 using stb1 tags(6,'6',6.0)
sql insert into tb1 values ('2021-05-05 18:19:00',1,1.0,1,1,1,1.0,true ,'1','1')
sql insert into tb1 values ('2021-05-05 18:19:01',2,2.0,2,2,2,2.0,true ,'2','2')
sql insert into tb1 values ('2021-05-05 18:19:02',3,3.0,3,3,3,3.0,false,'3','3')
sql insert into tb1 values ('2021-05-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4')
sql insert into tb1 values ('2021-05-05 18:19:04',11,11.0,11,11,11,11.0,true ,'11','11')
sql insert into tb1 values ('2021-05-05 18:19:05',12,12.0,12,12,12,12.0,true ,'12','12')
sql insert into tb1 values ('2021-05-05 18:19:06',13,13.0,13,13,13,13.0,false,'13','13')
sql insert into tb1 values ('2021-05-05 18:19:07',14,14.0,14,14,14,14.0,false,'14','14')
sql insert into tb2 values ('2021-05-05 18:19:08',21,21.0,21,21,21,21.0,true ,'21','21')
sql insert into tb2 values ('2021-05-05 18:19:09',22,22.0,22,22,22,22.0,true ,'22','22')
sql insert into tb2 values ('2021-05-05 18:19:10',23,23.0,23,23,23,23.0,false,'23','23')
sql insert into tb2 values ('2021-05-05 18:19:11',24,24.0,24,24,24,24.0,false,'24','24')
sql insert into tb3 values ('2021-05-05 18:19:12',31,31.0,31,31,31,31.0,true ,'31','31')
sql insert into tb3 values ('2021-05-05 18:19:13',32,32.0,32,32,32,32.0,true ,'32','32')
sql insert into tb3 values ('2021-05-05 18:19:14',33,33.0,33,33,33,33.0,false,'33','33')
sql insert into tb3 values ('2021-05-05 18:19:15',34,34.0,34,34,34,34.0,false,'34','34')
sql insert into tb4 values ('2021-05-05 18:19:16',41,41.0,41,41,41,41.0,true ,'41','41')
sql insert into tb4 values ('2021-05-05 18:19:17',42,42.0,42,42,42,42.0,true ,'42','42')
sql insert into tb4 values ('2021-05-05 18:19:18',43,43.0,43,43,43,43.0,false,'43','43')
sql insert into tb4 values ('2021-05-05 18:19:19',44,44.0,44,44,44,44.0,false,'44','44')
sql insert into tb5 values ('2021-05-05 18:19:20',51,51.0,51,51,51,51.0,true ,'51','51')
sql insert into tb5 values ('2021-05-05 18:19:21',52,52.0,52,52,52,52.0,true ,'52','52')
sql insert into tb5 values ('2021-05-05 18:19:22',53,53.0,53,53,53,53.0,false,'53','53')
sql insert into tb5 values ('2021-05-05 18:19:23',54,54.0,54,54,54,54.0,false,'54','54')
sql insert into tb6 values ('2021-05-05 18:19:24',61,61.0,61,61,61,61.0,true ,'61','61')
sql insert into tb6 values ('2021-05-05 18:19:25',62,62.0,62,62,62,62.0,true ,'62','62')
sql insert into tb6 values ('2021-05-05 18:19:26',63,63.0,63,63,63,63.0,false,'63','63')
sql insert into tb6 values ('2021-05-05 18:19:27',64,64.0,64,64,64,64.0,false,'64','64')
sql insert into tb6 values ('2021-05-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
sql create table stb2 (ts timestamp, u1 int unsigned, u2 bigint unsigned, u3 smallint unsigned, u4 tinyint unsigned, ts2 timestamp) TAGS(t1 int unsigned, t2 bigint unsigned, t3 timestamp, t4 int)
sql create table tb2_1 using stb2 tags(1,1,'2021-05-05 18:38:38',1)
sql create table tb2_2 using stb2 tags(2,2,'2021-05-05 18:58:58',2)
sql insert into tb2_1 values ('2021-05-05 18:19:00',1,2,3,4,'2021-05-05 18:28:01')
sql insert into tb2_1 values ('2021-05-05 18:19:01',5,6,7,8,'2021-05-05 18:28:02')
sql insert into tb2_1 values ('2021-05-05 18:19:02',2,2,3,4,'2021-05-05 18:28:03')
sql insert into tb2_1 values ('2021-05-05 18:19:03',5,6,7,8,'2021-05-05 18:28:04')
sql insert into tb2_1 values ('2021-05-05 18:19:04',3,2,3,4,'2021-05-05 18:28:05')
sql insert into tb2_1 values ('2021-05-05 18:19:05',5,6,7,8,'2021-05-05 18:28:06')
sql insert into tb2_1 values ('2021-05-05 18:19:06',4,2,3,4,'2021-05-05 18:28:07')
sql insert into tb2_1 values ('2021-05-05 18:19:07',5,6,7,8,'2021-05-05 18:28:08')
sql insert into tb2_1 values ('2021-05-05 18:19:08',5,2,3,4,'2021-05-05 18:28:09')
sql insert into tb2_1 values ('2021-05-05 18:19:09',5,6,7,8,'2021-05-05 18:28:10')
sql insert into tb2_1 values ('2021-05-05 18:19:10',6,2,3,4,'2021-05-05 18:28:11')
sql insert into tb2_2 values ('2021-05-05 18:19:11',5,6,7,8,'2021-05-05 18:28:12')
sql insert into tb2_2 values ('2021-05-05 18:19:12',7,2,3,4,'2021-05-05 18:28:13')
sql insert into tb2_2 values ('2021-05-05 18:19:13',5,6,7,8,'2021-05-05 18:28:14')
sql insert into tb2_2 values ('2021-05-05 18:19:14',8,2,3,4,'2021-05-05 18:28:15')
sql insert into tb2_2 values ('2021-05-05 18:19:15',5,6,7,8,'2021-05-05 18:28:16')
sql create table stb3 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double)
sql create table tb3_1 using stb3 tags(1,'1',1.0)
sql create table tb3_2 using stb3 tags(2,'2',2.0)
sql insert into tb3_1 values ('2021-01-05 18:19:00',1,1.0,1,1,1,1.0,true ,'1','1')
sql insert into tb3_1 values ('2021-02-05 18:19:01',2,2.0,2,2,2,2.0,true ,'2','2')
sql insert into tb3_1 values ('2021-03-05 18:19:02',3,3.0,3,3,3,3.0,false,'3','3')
sql insert into tb3_1 values ('2021-04-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4')
sql insert into tb3_1 values ('2021-05-05 18:19:28',5,NULL,5,NULL,5,NULL,true,NULL,'5')
sql insert into tb3_1 values ('2021-06-05 18:19:28',NULL,6.0,NULL,6,NULL,6.0,NULL,'6',NULL)
sql insert into tb3_1 values ('2021-07-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
sql insert into tb3_2 values ('2021-01-06 18:19:00',11,11.0,11,11,11,11.0,true ,'11','11')
sql insert into tb3_2 values ('2021-02-06 18:19:01',12,12.0,12,12,12,12.0,true ,'12','12')
sql insert into tb3_2 values ('2021-03-06 18:19:02',13,13.0,13,13,13,13.0,false,'13','13')
sql insert into tb3_2 values ('2021-04-06 18:19:03',14,14.0,14,14,14,14.0,false,'14','14')
sql insert into tb3_2 values ('2021-05-06 18:19:28',15,NULL,15,NULL,15,NULL,true,NULL,'15')
sql insert into tb3_2 values ('2021-06-06 18:19:28',NULL,16.0,NULL,16,NULL,16.0,NULL,'16',NULL)
sql insert into tb3_2 values ('2021-07-06 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
sql create table stb4 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9),c10 binary(16300)) TAGS(t1 int, t2 binary(10), t3 double)
sql create table tb4_0 using stb4 tags(0,'0',0.0)
sql create table tb4_1 using stb4 tags(1,'1',1.0)
sql create table tb4_2 using stb4 tags(2,'2',2.0)
sql create table tb4_3 using stb4 tags(3,'3',3.0)
sql create table tb4_4 using stb4 tags(4,'4',4.0)
$i = 0
$ts0 = 1625850000000
$blockNum = 5
$delta = 0
$tbname0 = tb4_
$a = 0
$b = 200
$c = 400
while $i < $blockNum
$x = 0
$rowNum = 1200
while $x < $rowNum
$ts = $ts0 + $x
$a = $a + 1
$b = $b + 1
$c = $c + 1
$d = $x / 10
$tin = $rowNum
$binary = 'binary . $c
$binary = $binary . '
$nchar = 'nchar . $c
$nchar = $nchar . '
$tbname = 'tb4_ . $i
$tbname = $tbname . '
sql insert into $tbname values ( $ts , $a , $b , $c , $d , $d , $c , true, $binary , $nchar , $binary )
$x = $x + 1
endw
$i = $i + 1
$ts0 = $ts0 + 259200000
endw
run tsim/parser/condition_query.sim
print ================== restart server to commit data into disk
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
sql connect
run tsim/parser/condition_query.sim
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c filterScalarMode -v 1
system sh/exec.sh -n dnode1 -s start
sql connect
sql drop database if exists db1;
sql create database if not exists db1;
sql use db1;
sql create stable sta (ts timestamp, f1 double, f2 binary(200)) tags(t1 int);
sql create table tba1 using sta tags(1);
sql insert into tba1 values ('2022-04-26 15:15:01', 1.0, "a");
sql insert into tba1 values ('2022-04-26 15:15:02', 2.0, "b");
sql insert into tba1 values ('2022-04-26 15:15:04', 4.0, "b");
sql insert into tba1 values ('2022-04-26 15:15:05', 5.0, "b");
sql select last_row(*) from sta where ts >= 1678901803783 and ts <= 1678901803783 and _c0 <= 1678901803782 interval(10d,8d) fill(linear) order by _wstart desc;
if $rows != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册