提交 77048d5a 编写于 作者: S Shengliang Guan

Merge from develop into feature/http

......@@ -100,7 +100,7 @@ IF (TD_LINUX)
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable")
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")
......
......@@ -78,6 +78,18 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s);
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里,没有采集的数据,TDengine还提供插值计算的功能。
......
......@@ -148,7 +148,7 @@ INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<fiel
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<time range>)
INTERVAL (<interval> [, offset])
GROUP BY <tag_name>, <tag_name>…
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>
......
......@@ -33,8 +33,7 @@ taos> DESCRIBE meters;
- 内部函数now是服务器的当前时间
- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位:a(毫秒), s(秒), m(分), h(小时), d(天),w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下:interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。
......@@ -299,7 +298,7 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL [interval_offset,] interval_val]
[INTERVAL (interval_val [, interval_offset])]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
......@@ -972,17 +971,17 @@ TDengine支持按时间段进行聚合,可以将表中数据按照时间段进
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[GROUP BY tags]
```
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- 聚合时间段的长度由关键词INTERVAL指定,最短时间间隔10毫秒(10a),并且支持偏移(偏移必须小于间隔)。聚合查询中,能够同时执行的聚合和选择函数仅限于单个输出的函数:count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last,不能使用具有多行输出结果的函数(例如:top、bottom、diff以及四则运算)。
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种:
1. 不进行填充:NONE(默认填充模式)。
......
......@@ -69,6 +69,7 @@ typedef struct SJoinSupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
SInterval interval;
SLimitVal limit; // limit info
uint64_t uid; // query table uid
SArray* colList; // previous query information, no need to use this attribute, and the corresponding attribution
......
......@@ -226,12 +226,8 @@ typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
// TODO refactor
char intervalTimeUnit;
char slidingTimeUnit;
STimeWindow window; // query time window
int64_t intervalTime; // aggregation time window range
int64_t slidingTime; // sliding window in mseconds
int64_t intervalOffset;// start offset of each time window
SInterval interval;
int32_t tz; // query client timezone
SSqlGroupbyExpr groupbyExpr; // group by tags info
......@@ -370,8 +366,6 @@ typedef struct SSqlStream {
uint32_t streamId;
char listed;
bool isProject;
char intervalTimeUnit;
char slidingTimeUnit;
int16_t precision;
int64_t num; // number of computing count
......@@ -385,8 +379,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
int64_t intervalTime;
int64_t slidingTime;
SInterval interval;
void * pTimer;
void (*fp)();
......
......@@ -711,13 +711,16 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en
if (pCtx->aOutputBuf == NULL) {
return BLK_DATA_ALL_NEEDED;
}
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else { // data in current block is not earlier than current result
return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
return BLK_DATA_ALL_NEEDED;
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
// is invalid
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED;
// } else { // data in current block is not earlier than current result
// return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
// }
}
static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
......@@ -730,12 +733,16 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end
return BLK_DATA_ALL_NEEDED;
}
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else {
return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
return BLK_DATA_ALL_NEEDED;
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
// is invalid
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED;
// } else {
// return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
// }
}
//////////////////////////////////////////////////////////////////////////////////////////////
......
......@@ -368,13 +368,12 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit,
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol);
}
}
......@@ -551,7 +550,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
// primary timestamp column is involved in final result
if (pQueryInfo->intervalTime != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
numOfGroupByCols++;
}
......@@ -568,7 +567,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
orderIdx[i] = startCols++;
}
if (pQueryInfo->intervalTime != 0) {
if (pQueryInfo->interval.interval != 0) {
// the first column is the timestamp, handles queries like "interval(10m) group by tags"
orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
......@@ -612,12 +611,12 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
* super table interval query
* if the order columns is the primary timestamp, all result data belongs to one group
*/
assert(pQueryInfo->intervalTime > 0);
assert(pQueryInfo->interval.interval > 0);
if (numOfCols == 1) {
return true;
}
} else { // simple group by query
assert(pQueryInfo->intervalTime == 0);
assert(pQueryInfo->interval.interval == 0);
}
// only one row exists
......@@ -825,8 +824,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
if (pFillInfo != NULL) {
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pFillInfo, revisedSTime);
}
......@@ -839,7 +837,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
}
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
......@@ -1220,7 +1218,7 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
#endif
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
......@@ -1258,13 +1256,10 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
int8_t precision = tinfo.precision;
// for group result interpolation, do not return if not data is generated
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
}
}
......
......@@ -142,7 +142,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
}
if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......
......@@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->slidingTime);
pSdesc->interval = htobe64(pStream->intervalTime);
pSdesc->slidingTime = htobe64(pStream->interval.sliding);
pSdesc->interval = htobe64(pStream->interval.interval);
pHeartbeat->numOfStreams++;
pSdesc++;
......
......@@ -81,6 +81,7 @@ static void setColumnOffsetValueInResultset(SQueryInfo* pQueryInfo);
static int32_t parseGroupbyClause(SQueryInfo* pQueryInfo, tVariantList* pList, SSqlCmd* pCmd);
static int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql);
static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem);
......@@ -350,7 +351,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
case TSDB_SQL_DESCRIBE_TABLE: {
SStrToken* pToken = &pInfo->pDCLInfo->a[0];
const char* msg1 = "invalid table name";
const char* msg2 = "table name is too long";
const char* msg2 = "table name too long";
if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
......@@ -409,7 +410,6 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
const char* msg3 = "name too long";
pCmd->command = pInfo->type;
// tDCLSQL* pDCL = pInfo->pDCLInfo;
SUserInfo* pUser = &pInfo->pDCLInfo->user;
SStrToken* pName = &pUser->user;
......@@ -595,24 +595,28 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
// interval is not null
SStrToken* t = &pQuerySql->interval;
if (parseDuration(t->z, t->n, &pQueryInfo->intervalTime, &pQueryInfo->intervalTimeUnit) != TSDB_CODE_SUCCESS) {
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval, &pQueryInfo->interval.intervalUnit) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
// if the unit of time window value is millisecond, change the value from microsecond
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->intervalTime = pQueryInfo->intervalTime / 1000;
pQueryInfo->interval.interval = pQueryInfo->interval.interval / 1000;
}
// interval cannot be less than 10 milliseconds
if (pQueryInfo->intervalTime < tsMinIntervalTime) {
if (pQueryInfo->interval.interval < tsMinIntervalTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
// for top/bottom + interval query, we do not add additional timestamp column in the front
if (isTopBottomQuery(pQueryInfo)) {
if (parseOffsetClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -636,7 +640,7 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
* check invalid SQL:
* select tbname, tags_fields from super_table_name interval(1s)
*/
if (tscQueryTags(pQueryInfo) && pQueryInfo->intervalTime > 0) {
if (tscQueryTags(pQueryInfo) && pQueryInfo->interval.interval > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
......@@ -662,6 +666,10 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
SColumnIndex index = {tableIndex, PRIMARYKEY_TIMESTAMP_COL_INDEX};
tscAddSpecialColumnForSelect(pQueryInfo, 0, TSDB_FUNC_TS, &index, &s, TSDB_COL_NORMAL);
if (parseOffsetClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (parseSlidingClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -669,6 +677,57 @@ int32_t parseIntervalClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ
return TSDB_CODE_SUCCESS;
}
int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg1 = "interval offset cannot be negative";
const char* msg2 = "interval offset should be shorter than interval";
const char* msg3 = "cannot use 'year' as offset when interval is 'month'";
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
SStrToken* t = &pQuerySql->offset;
if (t->n == 0) {
pQueryInfo->interval.offsetUnit = pQueryInfo->interval.intervalUnit;
pQueryInfo->interval.offset = 0;
return TSDB_CODE_SUCCESS;
}
if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset, &pQueryInfo->interval.offsetUnit) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->interval.offset < 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if (pQueryInfo->interval.offsetUnit != 'n' && pQueryInfo->interval.offsetUnit != 'y') {
// if the unit of time window value is millisecond, change the value from microsecond
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->interval.offset = pQueryInfo->interval.offset / 1000;
}
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
}
} else if (pQueryInfo->interval.offsetUnit == pQueryInfo->interval.intervalUnit) {
if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else if (pQueryInfo->interval.intervalUnit == 'n' && pQueryInfo->interval.offsetUnit == 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
} else if (pQueryInfo->interval.intervalUnit == 'y' && pQueryInfo->interval.offsetUnit == 'n') {
if (pQueryInfo->interval.interval * 12 <= pQueryInfo->interval.offset) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
} else {
// TODO: offset should be shorter than interval, but how to check
// conflicts like 30days offset and 1 month interval
}
return TSDB_CODE_SUCCESS;
}
int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql) {
const char* msg0 = "sliding value too small";
const char* msg1 = "sliding value no larger than the interval value";
......@@ -682,29 +741,29 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
SStrToken* pSliding = &pQuerySql->sliding;
if (pSliding->n == 0) {
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
return TSDB_CODE_SUCCESS;
}
if (pQueryInfo->intervalTimeUnit == 'n' || pQueryInfo->intervalTimeUnit == 'y') {
if (pQueryInfo->interval.intervalUnit == 'n' || pQueryInfo->interval.intervalUnit == 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
getTimestampInUsFromStr(pSliding->z, pSliding->n, &pQueryInfo->slidingTime);
parseAbsoluteDuration(pSliding->z, pSliding->n, &pQueryInfo->interval.sliding);
if (tinfo.precision == TSDB_TIME_PRECISION_MILLI) {
pQueryInfo->slidingTime /= 1000;
pQueryInfo->interval.sliding /= 1000;
}
if (pQueryInfo->slidingTime < tsMinSlidingTime) {
if (pQueryInfo->interval.sliding < tsMinSlidingTime) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
if ((pQueryInfo->intervalTime != 0) && (pQueryInfo->intervalTime/pQueryInfo->slidingTime > INTERVAL_SLIDING_FACTOR)) {
if ((pQueryInfo->interval.interval != 0) && (pQueryInfo->interval.interval/pQueryInfo->interval.sliding > INTERVAL_SLIDING_FACTOR)) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
......@@ -713,7 +772,7 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu
int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pzTableName, SSqlObj* pSql) {
const char* msg1 = "name too long";
const char* msg2 = "current database name is invalid";
const char* msg2 = "current database or database name invalid";
SSqlCmd* pCmd = &pSql->cmd;
int32_t code = TSDB_CODE_SUCCESS;
......@@ -4716,9 +4775,9 @@ int32_t validateSqlFunctionInStreamSql(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
const char* msg0 = "sample interval can not be less than 10ms.";
const char* msg1 = "functions not allowed in select clause";
if (pQueryInfo->intervalTime != 0 && pQueryInfo->intervalTime < 10 &&
pQueryInfo->intervalTimeUnit != 'n' &&
pQueryInfo->intervalTimeUnit != 'y') {
if (pQueryInfo->interval.interval != 0 && pQueryInfo->interval.interval < 10 &&
pQueryInfo->interval.intervalUnit != 'n' &&
pQueryInfo->interval.intervalUnit != 'y') {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg0);
}
......@@ -5503,7 +5562,7 @@ static int32_t doAddGroupbyColumnsOnDemand(SSqlCmd* pCmd, SQueryInfo* pQueryInfo
insertResultField(pQueryInfo, (int32_t)size, &ids, bytes, (int8_t)type, name, pExpr);
} else {
// if this query is "group by" normal column, interval is not allowed
if (pQueryInfo->intervalTime > 0) {
if (pQueryInfo->interval.interval > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2);
}
......@@ -5536,7 +5595,7 @@ int32_t doFunctionsCompatibleCheck(SSqlCmd* pCmd, SQueryInfo* pQueryInfo) {
// only retrieve tags, group by is not supportted
if (tscQueryTags(pQueryInfo)) {
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->intervalTime > 0) {
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0 || pQueryInfo->interval.interval > 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4);
} else {
return TSDB_CODE_SUCCESS;
......@@ -5988,7 +6047,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->intervalTime > 0) &&
if ((pQueryInfo->interval.interval > 0) &&
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -6018,7 +6077,7 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) {
* not here.
*/
if (pQuerySql->fillType != NULL) {
if (pQueryInfo->intervalTime == 0) {
if (pQueryInfo->interval.interval == 0) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3);
}
......@@ -6186,7 +6245,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
if (parseIntervalClause(pCmd, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
} else {
if ((pQueryInfo->intervalTime > 0) &&
if ((pQueryInfo->interval.interval > 0) &&
(validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) {
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -6237,14 +6296,19 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) {
* the columns may be increased due to group by operation
*/
if (pQuerySql->fillType != NULL) {
if (pQueryInfo->intervalTime == 0 && (!tscIsPointInterpQuery(pQueryInfo))) {
if (pQueryInfo->interval.interval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5);
}
if (pQueryInfo->intervalTime > 0 && pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y') {
if (pQueryInfo->interval.interval > 0 && pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') {
bool initialWindows = TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER);
if (initialWindows) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey);
// number of result is not greater than 10,000,000
if ((timeRange == 0) || (timeRange / pQueryInfo->intervalTime) > MAX_INTERVAL_TIME_WINDOW) {
if ((timeRange == 0) || (timeRange / pQueryInfo->interval.interval) > MAX_INTERVAL_TIME_WINDOW) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6);
}
}
......@@ -6387,4 +6451,4 @@ bool hasNormalColumnFilter(SQueryInfo* pQueryInfo) {
}
return false;
}
\ No newline at end of file
}
......@@ -647,8 +647,8 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->intervalTime < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->intervalTime);
if (pQueryInfo->interval.interval < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->interval.interval);
return TSDB_CODE_TSC_INVALID_SQL;
}
......@@ -675,10 +675,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval);
pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding);
pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset);
pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
......
......@@ -51,7 +51,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
// change to ms
if (prec == TSDB_TIME_PRECISION_MICRO) {
slidingTime = slidingTime / 1000;
......@@ -87,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
......@@ -132,15 +132,16 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
if (etime > pStream->etime) {
etime = pStream->etime;
} else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->interval.interval * pStream->interval.interval;
} else {
etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
etime = taosTimeTruncate(etime, &pStream->interval, pStream->precision);
//etime = taosGetIntervalStartTimestamp(etime, pStream->interval.sliding, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
}
pQueryInfo->window.ekey = etime;
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
int64_t timer = pStream->slidingTime;
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
int64_t timer = pStream->interval.sliding;
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
timer = 86400 * 1000l;
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer /= 1000l;
......@@ -162,7 +163,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
......@@ -223,7 +224,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
SSqlObj * pSql = (SSqlObj *)res;
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
......@@ -246,11 +247,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
if (!pStream->isProject) {
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
} else {
pStream->stime += pStream->slidingTime;
}
pStream->stime = taosTimeAdd(pStream->stime, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
}
// actually only one row is returned. this following is not necessary
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
......@@ -310,7 +307,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
now + timer, timer, delay, pStream->stime, etime);
} else {
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
......@@ -324,12 +321,12 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
int64_t delayDelta = maxDelay;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
delayDelta = (int64_t)(pStream->interval.sliding * tsStreamComputDelayRatio);
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
int64_t remainTimeWindow = pStream->interval.sliding - delayDelta;
if (maxDelay > remainTimeWindow) {
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
}
......@@ -337,8 +334,8 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
assert(currentDelay < pStream->slidingTime);
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
assert(currentDelay < pStream->interval.sliding);
}
return currentDelay;
......@@ -353,7 +350,7 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
* for project query, no mater fetch data successfully or not, next launch will issue
* more than the sliding time window
*/
timer = pStream->slidingTime;
timer = pStream->interval.sliding;
if (pStream->stime > pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
......@@ -366,7 +363,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
return;
}
} else {
int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
//int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
if (stime >= pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
......@@ -400,43 +398,43 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
pQueryInfo->interval.interval, minIntervalTime);
pQueryInfo->interval.interval = minIntervalTime;
}
pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
pStream->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pStream->interval.interval = pQueryInfo->interval.interval; // it shall be derived from sql string
if (pQueryInfo->slidingTime <= 0) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
if (pQueryInfo->interval.sliding <= 0) {
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
pQueryInfo->interval.sliding, minSlidingTime);
pQueryInfo->slidingTime = minSlidingTime;
pQueryInfo->interval.sliding = minSlidingTime;
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, pQueryInfo->intervalTime);
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
}
pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pStream->slidingTime = pQueryInfo->slidingTime;
pStream->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pStream->interval.sliding = pQueryInfo->interval.sliding;
if (pStream->isProject) {
pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->slidingTime = 0;
pQueryInfo->interval.interval = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->interval.sliding = 0;
}
}
......@@ -445,8 +443,8 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
if (pStream->isProject) {
// no data in table, flush all data till now to destination meter, 10sec delay
pStream->intervalTime = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
pStream->interval.interval = tsProjectExecInterval;
pStream->interval.sliding = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
assert(stime >= pQueryInfo->window.skey);
......@@ -459,12 +457,15 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
stime = pQueryInfo->window.skey;
if (stime == INT64_MIN) {
stime = (int64_t)taosGetTimestamp(pStream->precision);
stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
stime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
stime = taosTimeTruncate(stime - 1, &pStream->interval, pStream->precision);
//stime = taosGetIntervalStartTimestamp(stime, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
//stime = taosGetIntervalStartTimestamp(stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
}
} else {
int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
//int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
......@@ -534,7 +535,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
......
......@@ -113,7 +113,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
* final results which is acquired after the secondry merge of in the client.
*/
if (pLimit->offset == 0 || pQueryInfo->intervalTime > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (win->skey > elem1.ts) {
win->skey = elem1.ts;
}
......@@ -178,6 +178,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
pSupporter->subqueryIndex = index;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
pSupporter->limit = pQueryInfo->limit;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, index);
......@@ -297,18 +298,20 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
success = false;
break;
}
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object
// set the second stage sub query for join process
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
memcpy(&pQueryInfo->interval, &pSupporter->interval, sizeof(pQueryInfo->interval));
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
pQueryInfo->colList = pSupporter->colList;
pQueryInfo->exprList = pSupporter->exprList;
pQueryInfo->fieldsInfo = pSupporter->fieldsInfo;
......@@ -1204,7 +1207,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
}
pNew->cmd.numOfCols = 0;
pNewQueryInfo->intervalTime = 0;
pNewQueryInfo->interval.interval = 0;
pSupporter->limit = pNewQueryInfo->limit;
pNewQueryInfo->limit.limit = -1;
......@@ -2185,7 +2188,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
}
// primary key column cannot be null in interval query, no need to check
if (i == 0 && pQueryInfo->intervalTime > 0) {
if (i == 0 && pQueryInfo->interval.interval > 0) {
continue;
}
......
......@@ -1849,6 +1849,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* p
}
assert(matched);
(void)matched;
}
tscFieldInfoUpdateOffset(pNewQueryInfo);
......@@ -1899,10 +1900,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pNewQueryInfo->command = pQueryInfo->command;
pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
pNewQueryInfo->type = pQueryInfo->type;
pNewQueryInfo->window = pQueryInfo->window;
pNewQueryInfo->limit = pQueryInfo->limit;
......
......@@ -35,8 +35,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H
......@@ -99,62 +99,7 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
key /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key /= 1000;
}
struct tm tm;
time_t t = (time_t)key;
localtime_r(&t, &tm);
if (timeUnit == 'y') {
intervalTime *= 12;
}
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
key = mktime(&tm) * 1000L;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key *= 1000L;
}
return key;
}
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
skey /= 1000;
ekey /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
skey /= 1000;
ekey /= 1000;
}
if (ekey < skey) {
int64_t tmp = ekey;
ekey = skey;
skey = tmp;
}
struct tm tm;
time_t t = (time_t)skey;
localtime_r(&t, &tm);
int smon = tm.tm_year * 12 + tm.tm_mon;
t = (time_t)ekey;
localtime_r(&t, &tm);
int emon = tm.tm_year * 12 + tm.tm_mon;
if (timeUnit == 'y') {
intervalTime *= 12;
}
return (emon - smon) / (int32_t)intervalTime;
}
#if 0
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
......@@ -219,6 +164,8 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
return start;
}
#endif
/*
* tablePrefix.columnName
* extract table name and save it in pTable, with only column name in pToken
......
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.0</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>
<url>https://github.com/taosdata/TDengine/blob/master/LICENSE</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git://github.com/taosdata/TDengine.git</connection>
<developerConnection>scm:git:git@github.com:taosdata/TDengine.git</developerConnection>
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.0</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<tag>HEAD</tag>
</scm>
<developers>
<developer>
<name>taosdata</name>
<email>support@taosdata.com</email>
<organization>https://www.taosdata.com/</organization>
<organizationUrl>https://www.taosdata.com/</organizationUrl>
</developer>
</developers>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<java.version>1.8</java.version>
<maven-compiler-plugin.version>3.6.0</maven-compiler-plugin.version>
<commons-logging.version>1.1.2</commons-logging.version>
<commons-lang3.version>3.5</commons-lang3.version>
</properties>
<dependencies>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging.version}</version>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<descriptors>
<descriptor>src/main/assembly/assembly-jar.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<encoding>UTF-8</encoding>
<source>${java.version}</source>
<target>${java.version}</target>
<debug>true</debug>
<showDeprecation>true</showDeprecation>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.12.4</version>
<configuration>
<testFailureIgnore>true</testFailureIgnore>
</configuration>
</plugin>
</plugins>
</build>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>
<url>https://github.com/taosdata/TDengine/blob/master/LICENSE</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git://github.com/taosdata/TDengine.git</connection>
<developerConnection>scm:git:git@github.com:taosdata/TDengine.git</developerConnection>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<tag>HEAD</tag>
</scm>
<developers>
<developer>
<name>taosdata</name>
<email>support@taosdata.com</email>
<organization>https://www.taosdata.com/</organization>
<organizationUrl>https://www.taosdata.com/</organizationUrl>
</developer>
</developers>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<java.version>1.8</java.version>
<maven-compiler-plugin.version>3.6.0</maven-compiler-plugin.version>
<commons-logging.version>1.1.2</commons-logging.version>
<commons-lang3.version>3.5</commons-lang3.version>
</properties>
<dependencies>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging.version}</version>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<descriptors>
<descriptor>src/main/assembly/assembly-jar.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<encoding>UTF-8</encoding>
<source>${java.version}</source>
<target>${java.version}</target>
<debug>true</debug>
<showDeprecation>true</showDeprecation>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.12.4</version>
<configuration>
<testFailureIgnore>true</testFailureIgnore>
</configuration>
</plugin>
</plugins>
</build>
</project>
......@@ -53,66 +53,12 @@ public class TSDBConnection implements Connection {
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
this.dbMetaData = meta;
//load taos.cfg start
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()) {
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
}
//load taos.cfg end
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
}
private List<String> loadConfigEndpoints(File cfgFile) {
List<String> endpoints = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null;
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
}
if (endpoints.size() > 1)
break;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return endpoints;
}
/**
* @param cfgDirPath
* @return return the config dir
**/
private File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null)
return loadDefaultConfigDir();
File cfgDir = new File(cfgDirPath);
if (!cfgDir.exists())
return loadDefaultConfigDir();
return cfgDir;
}
/**
* @return search the default config dir, if the config dir is not exist will return null
*/
private File loadDefaultConfigDir() {
File cfgDir;
File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
File cfgDir_windows = new File("C:\\TDengine\\cfg");
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
return cfgDir;
}
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
this.connector = new TSDBJNIConnector();
this.connector.connect(host, port, dbName, user, password);
......
......@@ -68,15 +68,15 @@ public class TSDBDatabaseMetaData implements java.sql.DatabaseMetaData {
}
public boolean nullsAreSortedLow() throws SQLException {
return false;
return !nullsAreSortedHigh();
}
public boolean nullsAreSortedAtStart() throws SQLException {
return false;
return true;
}
public boolean nullsAreSortedAtEnd() throws SQLException {
return false;
return !nullsAreSortedAtStart();
}
public String getDatabaseProductName() throws SQLException {
......
......@@ -242,7 +242,7 @@ public class TSDBStatement implements Statement {
public void addBatch(String sql) throws SQLException {
if (batchedArgs == null) {
batchedArgs = new ArrayList<String>();
batchedArgs = new ArrayList<>();
}
batchedArgs.add(sql);
}
......
......@@ -101,6 +101,7 @@ extern const int32_t TYPE_BYTES[11];
#define TSDB_TIME_PRECISION_MILLI 0
#define TSDB_TIME_PRECISION_MICRO 1
#define TSDB_TIME_PRECISION_NANO 2
#define TSDB_TICK_PER_SECOND(precision) ((precision)==TSDB_TIME_PRECISION_MILLI ? 1e3L : ((precision)==TSDB_TIME_PRECISION_MICRO ? 1e6L : 1e9L))
#define TSDB_TIME_PRECISION_MILLI_STR "ms"
#define TSDB_TIME_PRECISION_MICRO_STR "us"
......
......@@ -460,11 +460,7 @@ typedef struct {
int16_t order;
int16_t orderColId;
int16_t numOfCols; // the number of columns will be load from vnode
int64_t intervalTime; // time interval for aggregation, in million second
int64_t intervalOffset; // start offset for interval query
int64_t slidingTime; // value for sliding window
char intervalTimeUnit;
char slidingTimeUnit; // time interval type, for revisement of interval(1d)
SInterval interval;
uint16_t tagCondLen; // tag length in current query
int16_t numOfGroupCols; // num of group by columns
int16_t orderByIdx;
......
......@@ -30,8 +30,6 @@ extern "C" {
#define MILLISECOND_PER_HOUR (MILLISECOND_PER_MINUTE * 60)
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7)
#define MILLISECOND_PER_MONTH (MILLISECOND_PER_DAY * 30)
#define MILLISECOND_PER_YEAR (MILLISECOND_PER_DAY * 365)
//@return timestamp in second
int32_t taosGetTimestampSec();
......@@ -63,8 +61,22 @@ static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
}
}
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts);
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
typedef struct SInterval {
char intervalUnit;
char slidingUnit;
char offsetUnit;
int64_t interval;
int64_t sliding;
int64_t offset;
} SInterval;
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts);
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
......
......@@ -321,7 +321,7 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
}
static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* result) {
static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
*result = val;
int64_t factor = 1000L;
......@@ -342,19 +342,12 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
case 'w':
(*result) *= MILLISECOND_PER_WEEK*factor;
break;
case 'n':
(*result) *= MILLISECOND_PER_MONTH*factor;
break;
case 'y':
(*result) *= MILLISECOND_PER_YEAR*factor;
break;
case 'a':
(*result) *= factor;
break;
case 'u':
break;
default: {
;
return -1;
}
}
......@@ -373,7 +366,7 @@ static int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t* resu
* n - Months (30 days)
* y - Years (365 days)
*/
int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration) {
errno = 0;
char* endPtr = NULL;
......@@ -383,10 +376,16 @@ int32_t getTimestampInUsFromStr(char* token, int32_t tokenlen, int64_t* ts) {
return -1;
}
return getTimestampInUsFromStrImpl(timestamp, token[tokenlen - 1], ts);
/* natual month/year are not allowed in absolute duration */
char unit = token[tokenlen - 1];
if (unit == 'n' || unit == 'y') {
return -1;
}
return getDurationInUs(timestamp, unit, duration);
}
int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
errno = 0;
/* get the basic numeric value */
......@@ -400,7 +399,121 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
return 0;
}
return getTimestampInUsFromStrImpl(*duration, *unit, duration);
return getDurationInUs(*duration, *unit, duration);
}
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
if (duration == 0) {
return t;
}
if (unit == 'y') {
duration *= 12;
} else if (unit != 'n') {
return t + duration;
}
struct tm tm;
time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision));
localtime_r(&tt, &tm);
int mon = tm.tm_year * 12 + tm.tm_mon + (int)duration;
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
return (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
}
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
if (ekey < skey) {
int64_t tmp = ekey;
ekey = skey;
skey = tmp;
}
if (unit != 'n' && unit != 'y') {
return (int32_t)((ekey - skey) / interval);
}
skey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
ekey /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
struct tm tm;
time_t t = (time_t)skey;
localtime_r(&t, &tm);
int smon = tm.tm_year * 12 + tm.tm_mon;
t = (time_t)ekey;
localtime_r(&t, &tm);
int emon = tm.tm_year * 12 + tm.tm_mon;
if (unit == 'y') {
interval *= 12;
}
return (emon - smon) / (int32_t)interval;
}
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) {
if (pInterval->sliding == 0) {
assert(pInterval->interval == 0);
return t;
}
int64_t start = t;
if (pInterval->slidingUnit == 'n' || pInterval->slidingUnit == 'y') {
start /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
struct tm tm;
time_t tt = (time_t)start;
localtime_r(&tt, &tm);
tm.tm_sec = 0;
tm.tm_min = 0;
tm.tm_hour = 0;
tm.tm_mday = 1;
if (pInterval->slidingUnit == 'y') {
tm.tm_mon = 0;
tm.tm_year = (int)(tm.tm_year / pInterval->sliding * pInterval->sliding);
} else {
int mon = tm.tm_year * 12 + tm.tm_mon;
mon = (int)(mon / pInterval->sliding * pInterval->sliding);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
}
start = (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
} else {
int64_t delta = t - pInterval->interval;
int32_t factor = delta > 0 ? 1 : -1;
start = (delta / pInterval->sliding + factor) * pInterval->sliding;
if (pInterval->intervalUnit == 'd' || pInterval->intervalUnit == 'w') {
/*
* here we revised the start time of day according to the local time zone,
* but in case of DST, the start time of one day need to be dynamically decided.
*/
// todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
// see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
int64_t timezone = _timezone;
int32_t daylight = _daylight;
char** tzname = _tzname;
#endif
start += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision));
}
int64_t end = start + pInterval->interval - 1;
if (end < t) {
start += pInterval->sliding;
}
}
if (pInterval->offset > 0) {
start = taosTimeAdd(start, pInterval->offset, pInterval->offsetUnit, precision);
if (start > t) {
start = taosTimeAdd(start, -pInterval->interval, pInterval->intervalUnit, precision);
}
}
return start;
}
// internal function, when program is paused in debugger,
......@@ -411,24 +524,38 @@ int32_t parseDuration(const char* token, int32_t tokenLen, int64_t* duration, ch
// 2020-07-03 17:48:42
// and the parameter can also be a variable.
const char* fmtts(int64_t ts) {
static char buf[32];
static char buf[96];
size_t pos = 0;
struct tm tm;
time_t tt;
if (ts > -62135625943 && ts < 32503651200) {
tt = ts;
} else if (ts > -62135625943000 && ts < 32503651200000) {
tt = ts / 1000;
} else {
tt = ts / 1000000;
time_t t = (time_t)ts;
localtime_r(&t, &tm);
pos += strftime(buf + pos, sizeof(buf), "s=%Y-%m-%d %H:%M:%S", &tm);
}
struct tm* ptm = localtime(&tt);
size_t pos = strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", ptm);
if (ts > -62135625943000 && ts < 32503651200000) {
time_t t = (time_t)(ts / 1000);
localtime_r(&t, &tm);
if (pos > 0) {
buf[pos++] = ' ';
buf[pos++] = '|';
buf[pos++] = ' ';
}
pos += strftime(buf + pos, sizeof(buf), "ms=%Y-%m-%d %H:%M:%S", &tm);
pos += sprintf(buf + pos, ".%03d", (int)(ts % 1000));
}
if (ts <= -62135625943000 || ts >= 32503651200000) {
sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
} else if (ts <= -62135625943 || ts >= 32503651200) {
sprintf(buf + pos, ".%03d", (int)(ts % 1000));
{
time_t t = (time_t)(ts / 1000000);
localtime_r(&t, &tm);
if (pos > 0) {
buf[pos++] = ' ';
buf[pos++] = '|';
buf[pos++] = ' ';
}
pos += strftime(buf + pos, sizeof(buf), "us=%Y-%m-%d %H:%M:%S", &tm);
pos += sprintf(buf + pos, ".%06d", (int)(ts % 1000000));
}
return buf;
......
......@@ -132,12 +132,9 @@ typedef struct SQueryCostInfo {
typedef struct SQuery {
int16_t numOfCols;
int16_t numOfTags;
char intervalTimeUnit;
char slidingTimeUnit; // interval data type, used for daytime revise
SOrderVal order;
STimeWindow window;
int64_t intervalTime;
int64_t slidingTime; // sliding time for sliding window query
SInterval interval;
int16_t precision;
int16_t numOfOutput;
int16_t fillType;
......
......@@ -51,12 +51,11 @@ typedef struct SFillInfo {
int32_t rowSize; // size of each row
// char ** pTags; // tags value for current interpolation
SFillTagColInfo* pTags; // tags value for filling gap
int64_t slidingTime; // sliding value to determine the number of result for a given time window
SInterval interval;
char * prevValues; // previous row of data, to generate the interpolation results
char * nextValues; // next row of data
char** pData; // original result data block involved in filling data
int32_t capacityInRows; // data buffer size in rows
int8_t slidingUnit; // sliding time unit
int8_t precision; // time resoluation
SFillColInfo* pFillCol; // column info for fill operations
} SFillInfo;
......
......@@ -73,12 +73,11 @@ typedef struct SDiskbasedResultBuf {
bool comp; // compressed before flushed to disk
int32_t nextPos; // next page flush position
const void* handle; // for debug purpose
const void* handle; // for debug purpose
SResultBufStatis statis;
} SDiskbasedResultBuf;
#define DEFAULT_INTERN_BUF_PAGE_SIZE (4096L)
#define DEFAULT_INMEM_BUF_PAGES 10
#define DEFAULT_INTERN_BUF_PAGE_SIZE (256L) // in bytes
#define PAGE_INFO_INITIALIZER (SPageDiskInfo){-1, -1}
/**
......
......@@ -65,6 +65,11 @@ typedef struct tVariantList {
tVariantListItem *a; /* One entry for each expression */
} tVariantList;
typedef struct SIntervalVal {
SStrToken interval;
SStrToken offset;
} SIntervalVal;
typedef struct SQuerySQL {
struct tSQLExprList *pSelection; // select clause
tVariantList * from; // from clause
......@@ -72,6 +77,7 @@ typedef struct SQuerySQL {
tVariantList * pGroupby; // groupby clause, only for tags[optional]
tVariantList * pSortOrder; // orderby [optional]
SStrToken interval; // interval [optional]
SStrToken offset; // offset window [optional]
SStrToken sliding; // sliding window [optional]
SLimitVal limit; // limit offset [optional]
SLimitVal slimit; // group limit offset [optional]
......@@ -259,7 +265,7 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken
void tSQLExprListDestroy(tSQLExprList *pList);
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit);
SCreateTableSQL *tSetCreateSQLElems(tFieldList *pCols, tFieldList *pTags, SStrToken *pMetricName,
......
......@@ -39,7 +39,6 @@ static FORCE_INLINE SWindowResult *getWindowResult(SWindowResInfo *pWindowResInf
}
#define curTimeWindowIndex(_winres) ((_winres)->curIndex)
#define GET_TIMEWINDOW(_winresInfo, _win) (STimeWindow) {(_win)->skey, ((_win)->skey + (_winresInfo)->interval - 1)}
#define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pSelectExpr[1].base.arg->argValue.i64:1)
bool isWindowResClosed(SWindowResInfo *pWindowResInfo, int32_t slot);
......
......@@ -458,9 +458,10 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
%type tmvar {SStrToken}
tmvar(A) ::= VARIABLE(X). {A = X;}
%type interval_opt {SStrToken}
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N = E; }
interval_opt(N) ::= . {N.n = 0; N.z = NULL; N.type = 0; }
%type interval_opt {SIntervalVal}
interval_opt(N) ::= INTERVAL LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; N.offset.z = NULL; N.offset.type = 0;}
interval_opt(N) ::= INTERVAL LP tmvar(E) COMMA tmvar(O) RP. {N.interval = E; N.offset = O;}
interval_opt(N) ::= . {memset(&N, 0, sizeof(N));}
%type fill_opt {tVariantList*}
%destructor fill_opt {tVariantListDestroy($$);}
......
......@@ -108,7 +108,7 @@ extern "C" {
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
#define MAX_INTERVAL_TIME_WINDOW 10000000
#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
#define TOP_BOTTOM_QUERY_LIMIT 100
enum {
......
此差异已折叠。
......@@ -38,8 +38,11 @@ SFillInfo* taosInitFillInfo(int32_t order, TSKEY skey, int32_t numOfTags, int32_
pFillInfo->numOfTags = numOfTags;
pFillInfo->numOfCols = numOfCols;
pFillInfo->precision = precision;
pFillInfo->slidingTime = slidingTime;
pFillInfo->slidingUnit = slidingUnit;
pFillInfo->interval.interval = slidingTime;
pFillInfo->interval.intervalUnit = slidingUnit;
pFillInfo->interval.sliding = slidingTime;
pFillInfo->interval.slidingUnit = slidingUnit;
pFillInfo->pData = malloc(POINTER_BYTES * numOfCols);
if (numOfTags > 0) {
......@@ -108,21 +111,15 @@ void* taosDestoryFillInfo(SFillInfo* pFillInfo) {
return NULL;
}
static TSKEY taosGetRevisedEndKey(TSKEY ekey, int32_t order, int64_t timeInterval, int8_t slidingTimeUnit, int8_t precision) {
if (order == TSDB_ORDER_ASC) {
return ekey;
} else {
return taosGetIntervalStartTimestamp(ekey, timeInterval, timeInterval, slidingTimeUnit, precision);
}
}
void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey) {
if (pFillInfo->fillType == TSDB_FILL_NONE) {
return;
}
pFillInfo->endKey = taosGetRevisedEndKey(endKey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
pFillInfo->precision);
pFillInfo->endKey = endKey;
if (pFillInfo->order != TSDB_ORDER_ASC) {
pFillInfo->endKey = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->precision);
}
pFillInfo->rowIdx = 0;
pFillInfo->numOfRows = numOfRows;
......@@ -172,30 +169,34 @@ int64_t getFilledNumOfRes(SFillInfo* pFillInfo, TSKEY ekey, int32_t maxNumOfRows
int32_t numOfRows = taosNumOfRemainRows(pFillInfo);
TSKEY ekey1 = taosGetRevisedEndKey(ekey, pFillInfo->order, pFillInfo->slidingTime, pFillInfo->slidingUnit,
pFillInfo->precision);
TSKEY ekey1 = ekey;
if (pFillInfo->order != TSDB_ORDER_ASC) {
pFillInfo->endKey = taosTimeTruncate(ekey, &pFillInfo->interval, pFillInfo->precision);
}
int64_t numOfRes = -1;
if (numOfRows > 0) { // still fill gap within current data block, not generating data after the result set.
TSKEY lastKey = tsList[pFillInfo->numOfRows - 1];
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
numOfRes = (int64_t)(ABS(lastKey - pFillInfo->start) / pFillInfo->slidingTime) + 1;
} else {
numOfRes = taosCountNatualInterval(lastKey, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
}
numOfRes = taosTimeCountInterval(
lastKey,
pFillInfo->start,
pFillInfo->interval.sliding,
pFillInfo->interval.slidingUnit,
pFillInfo->precision);
numOfRes += 1;
assert(numOfRes >= numOfRows);
} else { // reach the end of data
if ((ekey1 < pFillInfo->start && FILL_IS_ASC_FILL(pFillInfo)) ||
(ekey1 > pFillInfo->start && !FILL_IS_ASC_FILL(pFillInfo))) {
return 0;
}
// the numOfRes rows are all filled with specified policy
if (pFillInfo->slidingUnit != 'y' && pFillInfo->slidingUnit != 'n') {
numOfRes = (ABS(ekey1 - pFillInfo->start) / pFillInfo->slidingTime) + 1;
} else {
numOfRes = taosCountNatualInterval(ekey1, pFillInfo->start, pFillInfo->slidingTime, pFillInfo->slidingUnit, pFillInfo->precision) + 1;
}
numOfRes = taosTimeCountInterval(
ekey1,
pFillInfo->start,
pFillInfo->interval.sliding,
pFillInfo->interval.slidingUnit,
pFillInfo->precision);
numOfRes += 1;
}
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
......@@ -374,12 +375,7 @@ static void doFillResultImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t* nu
setTagsValue(pFillInfo, data, *num);
}
// TODO natual sliding time
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
pFillInfo->start += (pFillInfo->slidingTime * step);
} else {
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
}
pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding * step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
pFillInfo->numOfCurrent++;
(*num) += 1;
......@@ -486,12 +482,7 @@ int32_t generateDataBlockImpl(SFillInfo* pFillInfo, tFilePage** data, int32_t nu
// set the tag value for final result
setTagsValue(pFillInfo, data, num);
// TODO natual sliding time
if (pFillInfo->slidingUnit != 'n' && pFillInfo->slidingUnit != 'y') {
pFillInfo->start += (pFillInfo->slidingTime * step);
} else {
pFillInfo->start = taosAddNatualInterval(pFillInfo->start, pFillInfo->slidingTime*step, pFillInfo->slidingUnit, pFillInfo->precision);
}
pFillInfo->start = taosTimeAdd(pFillInfo->start, pFillInfo->interval.sliding*step, pFillInfo->interval.slidingUnit, pFillInfo->precision);
pFillInfo->rowIdx += 1;
pFillInfo->numOfCurrent +=1;
......
......@@ -135,7 +135,7 @@ tSQLExpr *tSQLExprIdValueCreate(SStrToken *pToken, int32_t optrType) {
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
pSQLExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
} else if (optrType == TK_VARIABLE) {
int32_t ret = getTimestampInUsFromStr(pToken->z, pToken->n, &pSQLExpr->val.i64Key);
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSQLExpr->val.i64Key);
UNUSED(ret);
pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT;
......@@ -443,44 +443,6 @@ void setDBName(SStrToken *pCpxName, SStrToken *pDB) {
pCpxName->n = pDB->n;
}
int32_t getTimestampInUsFromStrImpl(int64_t val, char unit, int64_t *result) {
*result = val;
switch (unit) {
case 's':
(*result) *= MILLISECOND_PER_SECOND;
break;
case 'm':
(*result) *= MILLISECOND_PER_MINUTE;
break;
case 'h':
(*result) *= MILLISECOND_PER_HOUR;
break;
case 'd':
(*result) *= MILLISECOND_PER_DAY;
break;
case 'w':
(*result) *= MILLISECOND_PER_WEEK;
break;
case 'n':
(*result) *= MILLISECOND_PER_MONTH;
break;
case 'y':
(*result) *= MILLISECOND_PER_YEAR;
break;
case 'a':
break;
default: {
;
return -1;
}
}
/* get the value in microsecond */
(*result) *= 1000L;
return 0;
}
void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) {
int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]);
......@@ -535,7 +497,7 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
* extract the select info out of sql string
*/
SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, tVariantList *pFrom, tSQLExpr *pWhere,
tVariantList *pGroupby, tVariantList *pSortOrder, SStrToken *pInterval,
tVariantList *pGroupby, tVariantList *pSortOrder, SIntervalVal *pInterval,
SStrToken *pSliding, tVariantList *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) {
assert(pSelection != NULL);
......@@ -558,7 +520,8 @@ SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection,
}
if (pInterval != NULL) {
pQuery->interval = *pInterval;
pQuery->interval = pInterval->interval;
pQuery->offset = pInterval->offset;
}
if (pSliding != NULL) {
......
......@@ -54,7 +54,7 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
pWindowResInfo->interval = pRuntimeEnv->pQuery->intervalTime;
pWindowResInfo->interval = pRuntimeEnv->pQuery->interval.interval;
pSummary->internalSupSize += sizeof(SWindowResult) * threshold;
pSummary->internalSupSize += (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultInfo) + pRuntimeEnv->interBufSize) * pWindowResInfo->capacity;
......
此差异已折叠。
......@@ -511,9 +511,9 @@ static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo) {
sInfo("%s, it is configured", pPeer->id);
int ret = strcmp(pPeer->fqdn, tsNodeFqdn);
if (pPeer->nodeId == 0 || (ret > 0) || (ret == 0 && pPeer->port > tsSyncPort)) {
sDebug("%s, start to check peer connection", pPeer->id);
int32_t checkMs = 100 + (pNode->vgId * 10) % 100;
if (pNode->vgId > 1) checkMs = tsStatusInterval * 2000 + checkMs;
sDebug("%s, start to check peer connection after %d ms", pPeer->id, checkMs);
taosTmrReset(syncCheckPeerConnection, checkMs, pPeer, syncTmrCtrl, &pPeer->timer);
}
......
......@@ -2707,4 +2707,5 @@ void tsdbDestroyTableGroup(STableGroupInfo *pGroupList) {
}
taosArrayDestroy(pGroupList->pGroupList);
pGroupList->numOfTables = 0;
}
文件模式从 100755 更改为 100644
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.getcwd())
from util.log import *
from util.sql import *
from util.dnodes import *
import taos
class TwoClients:
def initConnection(self):
self.host = "127.0.0.1"
self.user = "root"
self.password = "taosdata"
self.config = "/home/xp/git/TDengine/sim/dnode1/cfg"
def run(self):
tdDnodes.init("")
tdDnodes.setTestCluster(False)
tdDnodes.setValgrind(False)
tdDnodes.stopAll()
tdDnodes.deploy(1)
tdDnodes.start(1)
# first client create a stable and insert data
conn1 = taos.connect(self.host, self.user, self.password, self.config)
cursor1 = conn1.cursor()
cursor1.execute("drop database if exists db")
cursor1.execute("create database db")
cursor1.execute("use db")
cursor1.execute("create table tb (ts timestamp, id int) tags(loc nchar(30))")
cursor1.execute("insert into t0 using tb tags('beijing') values(now, 1)")
# second client alter the table created by cleint
conn2 = taos.connect(self.host, self.user, self.password, self.config)
cursor2 = conn2.cursor()
cursor2.execute("use db")
cursor2.execute("alter table tb add column name nchar(30)")
# first client should not be able to use the origin metadata
tdSql.init(cursor1, True)
tdSql.error("insert into t0 values(now, 2)")
# first client should be able to insert data with udpated medadata
tdSql.execute("insert into t0 values(now, 2, 'test')")
tdSql.query("select * from tb")
tdSql.checkRows(2)
# second client drop the table
cursor2.execute("drop table t0")
cursor2.execute("create table t0 using tb tags('beijing')")
tdSql.execute("insert into t0 values(now, 2, 'test')")
tdSql.query("select * from tb")
tdSql.checkRows(1)
# error expected for two clients drop the same cloumn
cursor2.execute("alter table tb drop column name")
tdSql.error("alter table tb drop column name")
cursor2.execute("alter table tb add column speed int")
tdSql.error("alter table tb add column speed int")
tdSql.execute("alter table tb add column size int")
tdSql.query("describe tb")
tdSql.checkRows(5)
tdSql.checkData(0, 0, "ts")
tdSql.checkData(1, 0, "id")
tdSql.checkData(2, 0, "speed")
tdSql.checkData(3, 0, "size")
tdSql.checkData(4, 0, "loc")
cursor1.close()
cursor2.close()
conn1.close()
conn2.close()
clients = TwoClients()
clients.initConnection()
clients.run()
\ No newline at end of file
......@@ -186,6 +186,8 @@ python3 ./test.py -f functions/function_sum.py
python3 ./test.py -f functions/function_top.py
#python3 ./test.py -f functions/function_twa.py
python3 queryCount.py
python3 ./test.py -f query/queryGroupbyWithInterval.py
python3 client/twoClients.py
# tools
python3 test.py -f tools/taosdemo.py
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
def general(self):
tdSql.execute("create table meters(ts timestamp, s int) tags(id int)")
tdSql.execute("create table t0 using meters tags(0)")
tdSql.execute("create table t1 using meters tags(1)")
tdSql.execute("create table t2 using meters tags(2)")
tdSql.execute("create table t3 using meters tags(3)")
tdSql.execute("create table t4 using meters tags(4)")
tdSql.execute("insert into t0 values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:00:01', 1)")
tdSql.execute("insert into t2 values('2019-01-01 00:01:00', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:01', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:02', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:03', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:30', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:01:50', 1)")
tdSql.execute("insert into t2 values('2019-01-01 00:02:00', 1)")
tdSql.execute("insert into t3 values('2019-01-01 00:02:02', 1)")
tdSql.execute("insert into t3 values('2019-01-01 00:02:59', 1)")
tdSql.execute("insert into t4 values('2019-01-01 00:02:59', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:03:10', 1)")
tdSql.execute("insert into t2 values('2019-01-01 00:08:00', 1)")
tdSql.execute("insert into t1 values('2019-01-01 00:08:00', 1)")
tdSql.query("select count(*) from meters interval(1m, 1s)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 2)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 1)
tdSql.checkData(5, 1, 2)
tdSql.query("select count(*) from meters interval(1m, 2s)")
tdSql.checkData(0, 1, 2)
tdSql.checkData(1, 1, 2)
tdSql.checkData(2, 1, 5)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 1)
tdSql.checkData(5, 1, 2)
tdSql.query("select count(*) from meters interval(90s, 1500a)")
tdSql.checkData(0, 1, 2)
tdSql.checkData(1, 1, 5)
tdSql.checkData(2, 1, 5)
tdSql.checkData(3, 1, 1)
tdSql.checkData(4, 1, 2)
def singleTable(self):
tdSql.execute("create table car(ts timestamp, s int)")
tdSql.execute("insert into car values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into car values('2019-05-13 12:00:00', 1)")
tdSql.execute("insert into car values('2019-12-31 23:59:59', 1)")
tdSql.execute("insert into car values('2020-01-01 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-02 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-03 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-04 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-05 12:00:00', 1)")
tdSql.execute("insert into car values('2020-01-31 12:00:00', 1)")
tdSql.execute("insert into car values('2020-02-01 12:00:00', 1)")
tdSql.execute("insert into car values('2020-02-02 12:00:00', 1)")
tdSql.execute("insert into car values('2020-02-29 12:00:00', 1)")
tdSql.execute("insert into car values('2020-03-01 12:00:00', 1)")
tdSql.execute("insert into car values('2020-03-02 12:00:00', 1)")
tdSql.execute("insert into car values('2020-03-15 12:00:00', 1)")
tdSql.execute("insert into car values('2020-03-31 12:00:00', 1)")
tdSql.execute("insert into car values('2020-05-01 12:00:00', 1)")
tdSql.query("select count(*) from car interval(1n, 10d)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 3)
tdSql.checkData(5, 1, 2)
tdSql.checkData(6, 1, 1)
tdSql.query("select count(*) from car interval(1n, 10d) order by ts desc")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 2)
tdSql.checkData(2, 1, 3)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 6)
tdSql.checkData(5, 1, 1)
tdSql.checkData(6, 1, 1)
tdSql.query("select count(*) from car interval(2n, 5d)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 6)
tdSql.checkData(4, 1, 3)
tdSql.query("select count(*) from car interval(2n) order by ts desc")
tdSql.checkData(0, 1, 3)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 1)
tdSql.checkData(4, 1, 1)
tdSql.query("select count(*) from car interval(1y, 1n)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 8)
tdSql.checkData(2, 1, 8)
tdSql.query("select count(*) from car interval(1y, 2n)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 11)
tdSql.checkData(2, 1, 5)
tdSql.query("select count(*) from car where ts > '2019-05-14 00:00:00' interval(1y, 5d)")
tdSql.checkData(0, 1, 6)
tdSql.checkData(1, 1, 9)
def superTable(self):
tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
tdSql.execute("create table car0 using cars tags(0)")
tdSql.execute("create table car1 using cars tags(1)")
tdSql.execute("create table car2 using cars tags(2)")
tdSql.execute("create table car3 using cars tags(3)")
tdSql.execute("create table car4 using cars tags(4)")
tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
tdSql.execute("insert into car2 values('2019-12-31 23:59:59', 1)")
tdSql.execute("insert into car1 values('2020-01-01 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-02 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-03 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-04 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-05 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-01-31 12:00:00', 1)")
tdSql.execute("insert into car1 values('2020-02-01 12:00:00', 1)")
tdSql.execute("insert into car2 values('2020-02-02 12:00:00', 1)")
tdSql.execute("insert into car2 values('2020-02-29 12:00:00', 1)")
tdSql.execute("insert into car3 values('2020-03-01 12:00:00', 1)")
tdSql.execute("insert into car3 values('2020-03-02 12:00:00', 1)")
tdSql.execute("insert into car3 values('2020-03-15 12:00:00', 1)")
tdSql.execute("insert into car4 values('2020-03-31 12:00:00', 1)")
tdSql.execute("insert into car3 values('2020-05-01 12:00:00', 1)")
tdSql.query("select count(*) from cars interval(1n, 10d)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 3)
tdSql.checkData(5, 1, 2)
tdSql.checkData(6, 1, 1)
tdSql.query("select count(*) from cars interval(1n, 10d) order by ts desc")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 2)
tdSql.checkData(2, 1, 3)
tdSql.checkData(3, 1, 3)
tdSql.checkData(4, 1, 6)
tdSql.checkData(5, 1, 1)
tdSql.checkData(6, 1, 1)
tdSql.query("select count(*) from cars interval(2n, 5d)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 1)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 6)
tdSql.checkData(4, 1, 3)
tdSql.query("select count(*) from cars interval(2n) order by ts desc")
tdSql.checkData(0, 1, 3)
tdSql.checkData(1, 1, 6)
tdSql.checkData(2, 1, 6)
tdSql.checkData(3, 1, 1)
tdSql.checkData(4, 1, 1)
tdSql.query("select count(*) from cars interval(1y, 1n)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 8)
tdSql.checkData(2, 1, 8)
tdSql.query("select count(*) from cars interval(1y, 2n)")
tdSql.checkData(0, 1, 1)
tdSql.checkData(1, 1, 11)
tdSql.checkData(2, 1, 5)
tdSql.query("select count(*) from cars where ts > '2019-05-14 00:00:00' interval(1y, 5d)")
tdSql.checkData(0, 1, 6)
tdSql.checkData(1, 1, 9)
def run(self):
tdSql.prepare()
self.general()
self.singleTable()
self.superTable()
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -89,10 +89,10 @@ class TDTestCase:
def superTable(self):
tdSql.execute("create table cars(ts timestamp, s int) tags(id int)")
tdSql.execute("create table car0 using cars tags(0)")
tdSql.execute("create table car1 using cars tags(0)")
tdSql.execute("create table car2 using cars tags(0)")
tdSql.execute("create table car3 using cars tags(0)")
tdSql.execute("create table car4 using cars tags(0)")
tdSql.execute("create table car1 using cars tags(1)")
tdSql.execute("create table car2 using cars tags(2)")
tdSql.execute("create table car3 using cars tags(3)")
tdSql.execute("create table car4 using cars tags(4)")
tdSql.execute("insert into car0 values('2019-01-01 00:00:00', 1)")
tdSql.execute("insert into car1 values('2019-05-13 12:00:00', 1)")
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
from util.log import tdLog
from util.cases import tdCases
from util.sql import tdSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute(
"create table stest(ts timestamp,size INT,filenum INT) tags (appname binary(500),tenant binary(500))")
tdSql.execute(
"insert into test1 using stest tags('test1','aaa') values ('2020-09-04 16:53:54.003',210,3)")
tdSql.execute(
"insert into test2 using stest tags('test1','aaa') values ('2020-09-04 16:53:56.003',210,3)")
tdSql.execute(
"insert into test11 using stest tags('test11','bbb') values ('2020-09-04 16:53:57.003',210,3)")
tdSql.execute(
"insert into test12 using stest tags('test11','bbb') values ('2020-09-04 16:53:58.003',210,3)")
tdSql.execute(
"insert into test21 using stest tags('test21','ccc') values ('2020-09-04 16:53:59.003',210,3)")
tdSql.execute(
"insert into test22 using stest tags('test21','ccc') values ('2020-09-04 16:54:54.003',210,3)")
tdSql.query("select sum(size) from stest interval(1d) group by appname")
tdSql.checkRows(3)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -3,7 +3,7 @@ sleep 3000
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c wallevel -v 0
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode1 -c maxSQLLength -v 7340032
system sh/cfg.sh -n dnode1 -c maxSQLLength -v 340032
system sh/exec.sh -n dnode1 -s start
sleep 3000
......
......@@ -86,8 +86,6 @@ print ========== insert data by multi-format
sql create table abc.tk_mt (ts timestamp, a int, b binary(16), c bool, d float, e double, f nchar(16)) tags (t1 int, t2 binary(16))
sql create table abc.tk_subt001 using tk_mt tags(1, 'subt001')
sql insert into abc.tk_subt001 values (now-1y, 1, 'binary_1', true, 1.001, 2.001, 'nchar_1')
sql insert into abc.tk_subt001 values (now-1n, 2, 'binary_2', true, 1.002, 2.002, 'nchar_2')
sql insert into abc.tk_subt001 values (now-1w, 3, 'binary_3', true, 1.003, 2.003, 'nchar_3')
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1d, 4, false, 2.004, 'nchar_4')
sql insert into abc.tk_subt001 (ts, a, c, e, f) values (now-1h, 5, false, 2.005, 'nchar_5')
......@@ -95,35 +93,29 @@ sql insert into abc.tk_subt001 (ts, b, d) values (now-1m, 'binary_6',
sql insert into abc.tk_subt001 (ts, b, d) values (now-1s, 'binary_7', 1.007)
sql insert into abc.tk_subt001 (ts, b, d) values (now-1a, 'binary_8', 1.008)
sql select * from tk_subt001
if $rows != 8 then
print ==== expect rows is 8, but actually is $rows
if $rows != 6 then
print ==== expect rows is 6, but actually is $rows
return -1
endi
sql insert into abc.tk_subt002 using tk_mt tags (22,'subt002x') values (now-2y, 2008, 'binary_2008', false, 2008.001, 2008.001, 'nchar_2008')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now-1y, 2007, 'binary_2007', false, 2007.001, 2007.001, 'nchar_2007')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now-1n, 2006, 'binary_2006', true, 2006.001, 2006.001, 'nchar_2006')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1s, 2001, 'binary_2001', true, 2001.001, 2001.001, 'nchar_2001')
sql insert into abc.tk_subt002 using tk_mt tags (22, 'subt002x') values (now+1s, 2001, 'binary_2001', true, 2001.001, 2001.001, 'nchar_2001')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1m, 2002, 'binary_2002', false, 2002.001, 2002.001, 'nchar_2002')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1h, 2003, 'binary_2003', false, 2003.001, 2003.001, 'nchar_2003')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1d, 2004, 'binary_2004', true, 2004.001, 2004.001, 'nchar_2004')
sql insert into abc.tk_subt002 using tk_mt tags (2, 'subt002') values (now+1w, 2005, 'binary_2005', false, 2005.001, 2005.001, 'nchar_2005')
sql select * from tk_subt002
if $rows != 8 then
print ==== expect rows is 8, but actually is $rows
if $rows != 5 then
print ==== expect rows is 5, but actually is $rows
return -1
endi
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-5y, 3001, false, 3001.001, 'nchar_3001')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-4y, 3002, false, 3002.001, 'nchar_3002')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-3y, 3003, true , 3003.001, 'nchar_3003')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-2y, 3004, false, 3004.001, 'nchar_3004')
sql insert into abc.tk_subt003 values (now-37d, 3005, 'binary_3005', false, 3005.001, 3005.001, 'nchar_3005')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-38d, 3004, false, 3004.001, 'nchar_3004')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (3, 'subt003') values (now-37d, 3005, false, 3005.001, 'nchar_3005')
sql insert into abc.tk_subt003 values (now-36d, 3006, 'binary_3006', true, 3006.001, 3006.001, 'nchar_3006')
sql insert into abc.tk_subt003 (ts, a, c, e, f) using tk_mt tags (33, 'subt003x') values (now-35d, 3007, false, 3007.001, 'nchar_3007')
sql select * from tk_subt003
if $rows != 7 then
print ==== expect rows is 7, but actually is $rows
if $rows != 4 then
print ==== expect rows is 4, but actually is $rows
return -1
endi
......
......@@ -850,6 +850,8 @@ if $rows != 12 then
return -1
endi
print =====================>td-1442
sql_error select count(*) from m_fl_tb0 interval(1s) fill(prev);
print =============== clear
sql drop database $db
......
......@@ -65,22 +65,23 @@ endi
if $data00 != @18-09-18 01:40:00.000@ then
return -1
endi
#if $data01 != NULL then
if $data01 != 999 then
return -1
endi
#if $data02 != NULL then
endi
if $data02 != 999 then
return -1
endi
#if $data03 != NULL then
endi
if $data03 != 999.00000 then
return -1
endi
#if $data04 != NULL then
if $data04 != 999.000000000 then
return -1
endi
#if $data05 != NULL then
if $data05 != 999 then
return -1
......@@ -127,7 +128,7 @@ if $data01 != 0 then
return -1
endi
#add check for out of range first/last query
print =============> add check for out of range first/last query
sql select first(ts),last(ts) from first_tb4 where ts>'2018-9-18 1:40:01';
if $row != 0 then
return -1
......@@ -136,4 +137,130 @@ endi
sql select first(ts),last(ts) from first_tb4 where ts<'2018-9-17 8:50:0';
if $row != 0 then
return -1
endi
#first/last mix up query
#select first(size),last(size) from stest interval(1d) group by tbname;
print =====================>td-1477
sql create table stest(ts timestamp,size INT,filenum INT) tags (appname binary(500),tenant binary(500));
sql insert into test1 using stest tags('test1','aaa') values ('2020-09-04 16:53:54.003',210,3);
sql insert into test2 using stest tags('test1','aaa') values ('2020-09-04 16:53:56.003',210,3);
sql insert into test11 using stest tags('test11','bbb') values ('2020-09-04 16:53:57.003',210,3);
sql insert into test12 using stest tags('test11','bbb') values ('2020-09-04 16:53:58.003',210,3);
sql insert into test21 using stest tags('test21','ccc') values ('2020-09-04 16:53:59.003',210,3);
sql insert into test22 using stest tags('test21','ccc') values ('2020-09-04 16:54:54.003',210,3);
sql select sum(size) from stest group by appname;
if $rows != 3 then
return -1
endi
if $data00 != 420 then
return -1
endi
if $data10 != 420 then
return -1
endi
if $data20 != 420 then
return -1
endi
if $data01 != @test1@ then
return -1
endi
if $data11 != @test11@ then
return -1
endi
if $data21 != @test21@ then
return -1
endi
sql select sum(size) from stest interval(1d) group by appname;
if $rows != 3 then
return -1
endi
#2020-09-04 00:00:00.000 | 420 | test1 |
#2020-09-04 00:00:00.000 | 420 | test11 |
#2020-09-04 00:00:00.000 | 420 | test21 |
if $data00 != @20-09-04 00:00:00.000@ then
return -1
endi
if $data10 != @20-09-04 00:00:00.000@ then
return -1
endi
if $data20 != @20-09-04 00:00:00.000@ then
return -1
endi
if $data01 != 420 then
print expect 420 , actual $data01
return -1
endi
if $data11 != 420 then
return -1
endi
if $data21 != 420 then
return -1
endi
if $data02 != @test1@ then
return -1
endi
if $data12 != @test11@ then
return -1
endi
if $data22 != @test21@ then
return -1
endi
print ===================>td-1477, one table has only one block occurs this bug.
sql select first(size),count(*),LAST(SIZE) from stest where tbname in ('test1', 'test2') interval(1d) group by tbname;
if $rows != 2 then
return -1
endi
if $data00 != @20-09-04 00:00:00.000@ then
return -1
endi
if $data01 != 210 then
return -1
endi
if $data02 != 1 then
return -1
endi
if $data03 != 210 then
return -1
endi
if $data04 != @test1@ then
return -1
endi
if $data10 != @20-09-04 00:00:00.000@ then
return -1
endi
if $data11 != 210 then
return -1
endi
if $data12 != 1 then
return -1
endi
if $data13 != 210 then
return -1
endi
if $data14 != @test2@ then
print expect test2 , actual: $data14
return -1
endi
\ No newline at end of file
......@@ -218,4 +218,10 @@ endi
if $data04 != 123.981000000 then
print expect 123.981000000, actual: $data04
return -1
endi
sql create table tu(ts timestamp, k int)
sql select last_row(*) from tu
if $row != 0 then
return -1
endi
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册