提交 e4f83886 编写于 作者: P Ping Xiao

Merge branch 'develop' into xiaoping/add_test_case

......@@ -122,10 +122,14 @@ IF (TD_LINUX)
ADD_DEFINITIONS(-D_TD_NINGSI_60)
MESSAGE(STATUS "set ningsi macro to true")
ENDIF ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -static-libasan -O0 -g3 -DDEBUG")
ELSE ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
ENDIF ()
SET(RELEASE_FLAGS "-O3 -Wno-error")
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
......@@ -144,7 +148,11 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
IF (TD_MEMORY_SANITIZER)
SET(DEBUG_FLAGS "-fsanitize=address -fsanitize=undefined -fno-sanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=null -fno-sanitize=alignment -O0 -g3 -DDEBUG")
ELSE ()
SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG")
ENDIF ()
SET(RELEASE_FLAGS "-Og")
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/cJson/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/lz4/inc)
......@@ -162,7 +170,14 @@ IF (TD_WINDOWS)
IF (MSVC AND (MSVC_VERSION GREATER_EQUAL 1900))
SET(COMMON_FLAGS "${COMMON_FLAGS} /Wv:18")
ENDIF ()
SET(DEBUG_FLAGS "/fsanitize=thread /fsanitize=leak /fsanitize=memory /fsanitize=undefined /fsanitize=hwaddress /Zi /W3 /GL")
IF (TD_MEMORY_SANITIZER)
MESSAGE("memory sanitizer detected as true")
SET(DEBUG_FLAGS "/fsanitize=address /Zi /W3 /GL")
ELSE ()
MESSAGE("memory sanitizer detected as false")
SET(DEBUG_FLAGS "/Zi /W3 /GL")
ENDIF ()
SET(RELEASE_FLAGS "/W0 /O3 /GL")
ENDIF ()
......@@ -171,7 +186,7 @@ IF (TD_WINDOWS)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/regex)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/wepoll/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/MsvcLibX/include)
ENDIF ()
ENDIF ()
IF (TD_WINDOWS_64)
ADD_DEFINITIONS(-D_M_X64)
......
......@@ -83,3 +83,8 @@ SET(TD_BUILD_JDBC TRUE)
IF (${BUILD_JDBC} MATCHES "false")
SET(TD_BUILD_JDBC FALSE)
ENDIF ()
SET(TD_MEMORY_SANITIZER FALSE)
IF (${MEMORY_SANITIZER} MATCHES "true")
SET(TD_MEMORY_SANITIZER TRUE)
ENDIF ()
......@@ -15,6 +15,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专
* [命令行程序TAOS](/getting-started#console):访问TDengine的简便方式
* [极速体验](/getting-started#demo):运行示例程序,快速体验高效的数据插入、查询
* [支持平台列表](/getting-started#platforms):TDengine服务器和客户端支持的平台列表
* [Kubenetes部署](https://taosdata.github.io/TDengine-Operator/zh/index.html):TDengine在Kubenetes环境进行部署的详细说明
## [整体架构](/architecture)
......
......@@ -176,7 +176,7 @@ TDengine 分布式架构的逻辑结构图如下:
**通讯方式:**TDengine系统的各个数据节点之间,以及应用驱动与各数据节点之间的通讯是通过TCP/UDP进行的。因为考虑到物联网场景,数据写入的包一般不大,因此TDengine 除采用TCP做传输之外,还采用UDP方式,因为UDP 更加高效,而且不受连接数的限制。TDengine实现了自己的超时、重传、确认等机制,以确保UDP的可靠传输。对于数据量不到15K的数据包,采取UDP的方式进行传输,超过15K的,或者是查询类的操作,自动采取TCP的方式进行传输。同时,TDengine根据配置和数据包,会自动对数据进行压缩/解压缩,数字签名/认证等处理。对于数据节点之间的数据复制,只采用TCP方式进行数据传输。
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。
**FQDN配置**:一个数据节点有一个或多个FQDN,可以在系统配置文件taos.cfg通过参数“fqdn"进行指定,如果没有指定,系统将自动获取计算机的hostname作为其FQDN。如果节点没有配置FQDN,可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP,因为IP地址可变,一旦变化,将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN,需要保证DNS服务正常工作,或者在节点以及应用所在的节点配置好hosts文件。另外,这个参数值的长度需要控制在 96 个字符以内。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定,对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口,是serverPort+10. 为支持多线程高效的处理UDP数据,每个对内和对外的UDP连接,都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10,总共11个TCP/UDP端口。(另外还可能有 RESTful、Arbitrator 所使用的端口,那样的话就一共是 13 个。)使用时,需要确保防火墙将这些端口打开,以备使用。每个数据节点可以配置不同的serverPort。(详细的端口情况请参见 [TDengine 2.0 端口说明](https://www.taosdata.com/cn/documentation/faq#port)
......
......@@ -325,10 +325,12 @@ for (int i = 0; i < numOfRows; i++){
}
s.setString(2, s2, 10);
// AddBatch 之后,可以再设定新的表名、TAGS、VALUES 取值,这样就能实现一次执行向多个数据表写入
// AddBatch 之后,缓存并未清空。为避免混乱,并不推荐在 ExecuteBatch 之前再次绑定新一批的数据
s.columnDataAddBatch();
// 执行语句:
// 执行绑定数据后的语句:
s.columnDataExecuteBatch();
// 执行语句后清空缓存。在清空之后,可以复用当前的对象,绑定新的一批数据(可以是新表名、新 TAGS 值、新 VALUES 值):
s.columnDataClearBatch();
// 执行完毕,释放资源:
s.columnDataCloseBatch();
```
......
......@@ -99,7 +99,7 @@ taosd -C
下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。**
- firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。
- fqdn:数据节点的FQDN,缺省为操作系统配置的第一个hostname。如果习惯IP地址访问,可设置为该节点的IP地址。这个参数值的长度需要控制在 96 个字符以内。
- serverPort:taosd启动后,对外服务的端口号,默认值为6030。(RESTful服务使用的端口号是在此基础上+11,即默认值为6041。)
- dataDir: 数据文件目录,所有的数据文件都将写入该目录。默认值:/var/lib/taos。
- logDir:日志文件目录,客户端和服务器的运行日志文件将写入该目录。默认值:/var/log/taos。
......
......@@ -138,6 +138,7 @@ bool isSimpleAggregateRv(SQueryInfo* pQueryInfo);
bool tscNonOrderedProjectionQueryOnSTable(SQueryInfo *pQueryInfo, int32_t tableIndex);
bool tscOrderedProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo);
bool tscIsProjectionQueryOnSTable(SQueryInfo* pQueryInfo, int32_t tableIndex);
bool tscIsProjectionQuery(SQueryInfo* pQueryInfo);
......
......@@ -266,6 +266,7 @@ typedef struct SSqlObj {
typedef struct SSqlStream {
SSqlObj *pSql;
void * cqhandle; // stream belong to SCQContext handle
const char* dstTable;
uint32_t streamId;
char listed;
......
......@@ -107,14 +107,10 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return tscInvalidOperationMsg(error, "value expected in timestamp", sToken.z);
}
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval, timePrec) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (timePrec == TSDB_TIME_PRECISION_MILLI) {
interval /= 1000;
}
if (sToken.type == TK_PLUS) {
useconds += interval;
} else {
......@@ -468,6 +464,10 @@ int tsParseOneRow(char **str, STableDataBlocks *pDataBlocks, int16_t timePrec, i
int32_t cnt = 0;
int32_t j = 0;
if (sToken.n >= TSDB_MAX_BYTES_PER_ROW) {
return tscSQLSyntaxErrMsg(pInsertParam->msg, "too long string", sToken.z);
}
for (uint32_t k = 1; k < sToken.n - 1; ++k) {
if (sToken.z[k] == '\\' || (sToken.z[k] == delim && sToken.z[k + 1] == delim)) {
tmpTokenBuf[j] = sToken.z[k + 1];
......@@ -711,7 +711,7 @@ static int32_t doParseInsertStatement(SInsertStatementParam *pInsertParam, char
}
code = TSDB_CODE_TSC_INVALID_OPERATION;
char tmpTokenBuf[16*1024] = {0}; // used for deleting Escape character: \\, \', \"
char tmpTokenBuf[TSDB_MAX_BYTES_PER_ROW] = {0}; // used for deleting Escape character: \\, \', \"
int32_t numOfRows = 0;
code = tsParseValues(str, dataBuf, maxNumOfRows, pInsertParam, &numOfRows, tmpTokenBuf);
......
......@@ -19,6 +19,7 @@
#include "ttimer.h"
#include "tutil.h"
#include "taosmsg.h"
#include "tcq.h"
#include "taos.h"
......@@ -294,24 +295,34 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
return msgLen;
}
void tscKillConnection(STscObj *pObj) {
pthread_mutex_lock(&pObj->mutex);
// cqContext->dbconn is killed then call this callback
void cqConnKilledNotify(void* handle, void* conn) {
if (handle == NULL || conn == NULL){
return ;
}
SSqlObj *pSql = pObj->sqlList;
while (pSql) {
pSql = pSql->next;
}
SCqContext* pContext = (SCqContext*) handle;
if (pContext->dbConn == conn){
atomic_store_ptr(&(pContext->dbConn), NULL);
}
}
void tscKillConnection(STscObj *pObj) {
// get stream header by locked
pthread_mutex_lock(&pObj->mutex);
SSqlStream *pStream = pObj->streamList;
pthread_mutex_unlock(&pObj->mutex);
while (pStream) {
SSqlStream *tmp = pStream->next;
// set associate variant to NULL
cqConnKilledNotify(pStream->cqhandle, pObj);
// taos_close_stream function call pObj->mutet lock , careful death-lock
taos_close_stream(pStream);
pStream = tmp;
}
pthread_mutex_unlock(&pObj->mutex);
tscDebug("connection:%p is killed", pObj);
taos_close(pObj);
}
此差异已折叠。
......@@ -912,7 +912,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
SGroupbyExpr *pGroupbyExpr = query.pGroupbyExpr;
if (pGroupbyExpr->numOfGroupCols > 0) {
if (pGroupbyExpr != NULL && pGroupbyExpr->numOfGroupCols > 0) {
pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex);
pQueryMsg->orderType = htons(pGroupbyExpr->orderType);
......
......@@ -53,9 +53,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
// change to ms
if (prec == TSDB_TIME_PRECISION_MICRO) {
slidingTime = slidingTime / 1000;
}
slidingTime = convertTimePrecision(slidingTime, pStream->precision, TSDB_TIME_PRECISION_MILLI);
if (slidingTime < retryDelta) {
return slidingTime;
......@@ -139,8 +137,13 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
pStream->numOfRes = 0; // reset the numOfRes.
SSqlObj *pSql = pStream->pSql;
// pSql == NULL maybe killStream already called
if(pSql == NULL) {
return ;
}
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
tscDebug("0x%"PRIx64" timer launch query", pSql->self);
tscDebug("0x%"PRIx64" add into timer", pSql->self);
if (pStream->isProject) {
/*
......@@ -157,11 +160,7 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
pQueryInfo->window.skey = pStream->stime;
int64_t etime = taosGetTimestamp(pStream->precision);
// delay to wait all data in last time window
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
etime -= tsMaxStreamComputDelay * 1000l;
} else {
etime -= tsMaxStreamComputDelay;
}
etime -= convertTimePrecision(tsMaxStreamComputDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
if (etime > pStream->etime) {
etime = pStream->etime;
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
......@@ -178,8 +177,8 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
int64_t timer = pStream->interval.sliding;
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
timer = 86400 * 1000l;
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer /= 1000l;
} else {
timer = convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
}
tscSetRetryTimer(pStream, pSql, timer);
return;
......@@ -339,8 +338,12 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
if (pStream->isProject) {
int64_t now = taosGetTimestamp(pStream->precision);
int64_t etime = now > pStream->etime ? pStream->etime : now;
if (pStream->etime < now && now - pStream->etime > tsMaxRetentWindow) {
int64_t maxRetent = tsMaxRetentWindow * 1000;
if(pStream->precision == TSDB_TIME_PRECISION_MICRO) {
maxRetent *= 1000;
}
if (pStream->etime < now && now - pStream->etime > maxRetent) {
/*
* current time window will be closed, since it too early to exceed the maxRetentWindow value
*/
......@@ -369,9 +372,8 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
}
static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
int64_t maxDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
int64_t maxDelay = convertTimePrecision(tsMaxStreamComputDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
int64_t delayDelta = maxDelay;
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
delayDelta = (int64_t)(pStream->interval.sliding * tsStreamComputDelayRatio);
......@@ -438,16 +440,14 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
timer += getLaunchTimeDelay(pStream);
if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer = timer / 1000L;
}
timer = convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
tscSetRetryTimer(pStream, pSql, timer);
}
static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
int64_t minIntervalTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime;
convertTimePrecision(tsMinIntervalTime, TSDB_TIME_PRECISION_MILLI, pStream->precision);
SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd);
......@@ -471,7 +471,7 @@ static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, pStream->precision);
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
tscWarn("0x%"PRIx64" stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql->self, pStream,
......@@ -539,13 +539,12 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
timer = pStream->stime - now;
}
int64_t startDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay;
int64_t startDelay = convertTimePrecision(tsStreamCompStartDelay, TSDB_TIME_PRECISION_MILLI, pStream->precision);
timer += getLaunchTimeDelay(pStream);
timer += startDelay;
return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer;
return convertTimePrecision(timer, pStream->precision, TSDB_TIME_PRECISION_MILLI);
}
static void tscCreateStream(void *param, TAOS_RES *res, int code) {
......@@ -664,7 +663,7 @@ void cbParseSql(void* param, TAOS_RES* res, int code) {
}
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
int64_t stime, void *param, void (*callback)(void *)) {
int64_t stime, void *param, void (*callback)(void *), void* cqhandle) {
STscObj *pObj = (STscObj *)taos;
if (pObj == NULL || pObj->signature != pObj) return NULL;
......@@ -697,6 +696,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
pStream->callback = callback;
pStream->param = param;
pStream->pSql = pSql;
pStream->cqhandle = cqhandle;
pSql->pStream = pStream;
pSql->param = pStream;
pSql->maxRetry = TSDB_MAX_REPLICA;
......@@ -745,7 +745,7 @@ TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* dstTable, const c
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
int64_t stime, void *param, void (*callback)(void *)) {
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback);
return taos_open_stream_withname(taos, "", sqlstr, fp, stime, param, callback, NULL);
}
void taos_close_stream(TAOS_STREAM *handle) {
......
......@@ -255,10 +255,14 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
int32_t functionId = tscExprGet(pQueryInfo, i)->base.functionId;
int32_t f = tscExprGet(pQueryInfo, i)->base.functionId;
if (f == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG &&
functionId != TSDB_FUNC_TS && functionId != TSDB_FUNC_ARITHM) {
if (f != TSDB_FUNC_PRJ && f != TSDB_FUNC_TAGPRJ && f != TSDB_FUNC_TAG &&
f != TSDB_FUNC_TS && f != TSDB_FUNC_ARITHM && f != TSDB_FUNC_DIFF &&
f != TSDB_FUNC_DERIVATIVE) {
return false;
}
}
......@@ -266,6 +270,24 @@ bool tscIsProjectionQuery(SQueryInfo* pQueryInfo) {
return true;
}
bool tscIsDiffDerivQuery(SQueryInfo* pQueryInfo) {
size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
int32_t f = tscExprGet(pQueryInfo, i)->base.functionId;
if (f == TSDB_FUNC_TS_DUMMY) {
continue;
}
if (f == TSDB_FUNC_DIFF || f == TSDB_FUNC_DERIVATIVE) {
return true;
}
}
return false;
}
bool tscHasColumnFilter(SQueryInfo* pQueryInfo) {
// filter on primary timestamp column
if (pQueryInfo->window.skey != INT64_MIN || pQueryInfo->window.ekey != INT64_MAX) {
......@@ -962,6 +984,9 @@ static void destroyDummyInputOperator(void* param, int32_t numOfOutput) {
pInfo->block = destroyOutputBuf(pInfo->block);
pInfo->pSql = NULL;
cleanupResultRowInfo(&pInfo->pTableQueryInfo->resInfo);
tfree(pInfo->pTableQueryInfo);
}
// todo this operator servers as the adapter for Operator tree and SqlRes result, remove it later
......@@ -4263,10 +4288,9 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
STableMetaInfo* pTableMetaInfo = pQueryInfo->pTableMetaInfo[0];
pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SGroupbyExpr));
*(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr;
if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) {
pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SGroupbyExpr));
*(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr;
pQueryAttr->pGroupbyExpr->columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo);
} else {
assert(pQueryInfo->groupbyExpr.columnInfo == NULL);
......@@ -4345,7 +4369,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
return TSDB_CODE_TSC_INVALID_OPERATION;
}
if (pQueryAttr->pGroupbyExpr->numOfGroupCols < 0) {
if (pQueryAttr->pGroupbyExpr != NULL && pQueryAttr->pGroupbyExpr->numOfGroupCols < 0) {
tscError("%p illegal value of numOfGroupCols in query msg: %d", addr, pQueryInfo->groupbyExpr.numOfGroupCols);
return TSDB_CODE_TSC_INVALID_OPERATION;
}
......
......@@ -831,6 +831,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "precision";
cfg.ptr = &tsTimePrecision;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_PRECISION;
cfg.maxValue = TSDB_MAX_PRECISION;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "comp";
cfg.ptr = &tsCompression;
cfg.valType = TAOS_CFG_VTYPE_INT8;
......@@ -901,6 +911,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "cachelast";
cfg.ptr = &tsCacheLastRow;
cfg.valType = TAOS_CFG_VTYPE_INT8;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW;
cfg.minValue = TSDB_MIN_DB_CACHE_LAST_ROW;
cfg.maxValue = TSDB_MAX_DB_CACHE_LAST_ROW;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttHostName";
cfg.ptr = tsMqttHostName;
cfg.valType = TAOS_CFG_VTYPE_STRING;
......
......@@ -74,7 +74,7 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
case TSDB_DATA_TYPE_BINARY: {
pVar->pz = strndup(token->z, token->n);
pVar->nLen = strdequote(pVar->pz);
pVar->nLen = strRmquote(pVar->pz, token->n);
break;
}
......
......@@ -595,7 +595,7 @@ public class TSDBPreparedStatementTest {
stmt.execute("create database dbtest");
Assert.assertThrows(SQLException.class, () -> stmt.execute("create database dbtest"));
}
@Test
public void setBoolean() throws SQLException {
// given
......
......@@ -177,7 +177,8 @@ public class TSDBResultSetTest {
rs.getAsciiStream("f1");
}
@Test(expected = SQLFeatureNotSupportedException.class)
@SuppressWarnings("deprecation")
@Test(expected = SQLFeatureNotSupportedException.class)
public void getUnicodeStream() throws SQLException {
rs.getUnicodeStream("f1");
}
......
......@@ -242,7 +242,7 @@ def _load_taos_linux():
def _load_taos_darwin():
return ctypes.cDLL('libtaos.dylib')
return ctypes.CDLL('libtaos.dylib')
def _load_taos_windows():
......
......@@ -38,21 +38,6 @@
#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
typedef struct {
int32_t vgId;
int32_t master;
int32_t num; // number of continuous streams
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
char db[TSDB_DB_NAME_LEN];
FCqWrite cqWrite;
struct SCqObj *pHead;
void *dbConn;
void *tmrCtrl;
pthread_mutex_t mutex;
int32_t delete;
int32_t cqObjNum;
} SCqContext;
typedef struct SCqObj {
tmr_h tmrId;
......@@ -439,7 +424,7 @@ static void cqProcessCreateTimer(void *param, void *tmrId) {
// inner implement in tscStream.c
TAOS_STREAM *taos_open_stream_withname(TAOS *taos, const char* desName, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
int64_t stime, void *param, void (*callback)(void *));
int64_t stime, void *param, void (*callback)(void *), void* cqhandle);
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj->pContext = pContext;
......@@ -453,7 +438,8 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj->tmrId = 0;
if (pObj->pStream == NULL) {
pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, INT64_MIN, (void *)pObj->rid, NULL);
pObj->pStream = taos_open_stream_withname(pContext->dbConn, pObj->dstTable, pObj->sqlStr, cqProcessStreamRes, \
INT64_MIN, (void *)pObj->rid, NULL, pContext);
// TODO the pObj->pStream may be released if error happens
if (pObj->pStream) {
......
......@@ -31,6 +31,23 @@ typedef struct {
FCqWrite cqWrite;
} SCqCfg;
// SCqContext
typedef struct {
int32_t vgId;
int32_t master;
int32_t num; // number of continuous streams
char user[TSDB_USER_LEN];
char pass[TSDB_KEY_LEN];
char db[TSDB_DB_NAME_LEN];
FCqWrite cqWrite;
struct SCqObj *pHead;
void *dbConn;
void *tmrCtrl;
pthread_mutex_t mutex;
int32_t delete;
int32_t cqObjNum;
} SCqContext;
// the following API shall be called by vnode
void *cqOpen(void *ahandle, const SCqCfg *pCfg);
void cqClose(void *handle);
......
......@@ -94,7 +94,7 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH);
int tsdbCloseRepo(STsdbRepo *repo, int toCommit);
int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg);
int tsdbGetState(STsdbRepo *repo);
bool tsdbInCompact(STsdbRepo *repo);
// --------- TSDB TABLE DEFINITION
typedef struct {
uint64_t uid; // the unique table ID
......
......@@ -398,7 +398,10 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
time_t tt;
int32_t ms = 0;
if (precision == TSDB_TIME_PRECISION_MICRO) {
if (precision == TSDB_TIME_PRECISION_NANO) {
tt = (time_t)(val / 1000000000);
ms = val % 1000000000;
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
tt = (time_t)(val / 1000000);
ms = val % 1000000;
} else {
......@@ -419,7 +422,9 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
#endif
if (tt <= 0 && ms < 0) {
tt--;
if (precision == TSDB_TIME_PRECISION_MICRO) {
if (precision == TSDB_TIME_PRECISION_NANO) {
ms += 1000000000;
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
ms += 1000000;
} else {
ms += 1000;
......@@ -427,9 +432,11 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
struct tm* ptm = localtime(&tt);
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", ptm);
if (precision == TSDB_TIME_PRECISION_MICRO) {
if (precision == TSDB_TIME_PRECISION_NANO) {
sprintf(buf + pos, ".%09d", ms);
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
sprintf(buf + pos, ".%06d", ms);
} else {
sprintf(buf + pos, ".%03d", ms);
......@@ -778,6 +785,8 @@ static int calcColWidth(TAOS_FIELD* field, int precision) {
case TSDB_DATA_TYPE_TIMESTAMP:
if (args.is_raw_time) {
return MAX(14, width);
} if (precision == TSDB_TIME_PRECISION_NANO) {
return MAX(29, width);
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
return MAX(26, width); // '2020-01-01 00:00:00.000000'
} else {
......
......@@ -1213,7 +1213,6 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
}
int totalLen = 0;
char temp[16000];
// fetch the records row by row
while((row = taos_fetch_row(res))) {
......@@ -1224,6 +1223,7 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) {
memset(databuf, 0, 100*1024*1024);
}
num_rows++;
char temp[16000] = {0};
int len = taos_print_row(temp, row, fields, num_fields);
len += sprintf(temp + len, "\n");
//printf("query result:%s\n", temp);
......@@ -1852,7 +1852,9 @@ static void printfQueryMeta() {
static char* formatTimestamp(char* buf, int64_t val, int precision) {
time_t tt;
if (precision == TSDB_TIME_PRECISION_MICRO) {
if (precision == TSDB_TIME_PRECISION_NANO) {
tt = (time_t)(val / 1000000000);
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
tt = (time_t)(val / 1000000);
} else {
tt = (time_t)(val / 1000);
......@@ -1873,7 +1875,9 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
struct tm* ptm = localtime(&tt);
size_t pos = strftime(buf, 32, "%Y-%m-%d %H:%M:%S", ptm);
if (precision == TSDB_TIME_PRECISION_MICRO) {
if (precision == TSDB_TIME_PRECISION_NANO) {
sprintf(buf + pos, ".%09d", (int)(val % 1000000000));
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
sprintf(buf + pos, ".%06d", (int)(val % 1000000));
} else {
sprintf(buf + pos, ".%03d", (int)(val % 1000));
......@@ -6253,9 +6257,11 @@ static void startMultiThreadInsertData(int threads, char* db_name,
if (0 != precision[0]) {
if (0 == strncasecmp(precision, "ms", 2)) {
timePrec = TSDB_TIME_PRECISION_MILLI;
} else if (0 == strncasecmp(precision, "us", 2)) {
} else if (0 == strncasecmp(precision, "us", 2)) {
timePrec = TSDB_TIME_PRECISION_MICRO;
} else {
} else if (0 == strncasecmp(precision, "ns", 2)) {
timePrec = TSDB_TIME_PRECISION_NANO;
} else {
errorPrint("Not support precision: %s\n", precision);
exit(-1);
}
......
......@@ -820,8 +820,13 @@ static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void
#endif
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
char *prec = (pDb->cfg.precision == TSDB_TIME_PRECISION_MILLI) ? TSDB_TIME_PRECISION_MILLI_STR
: TSDB_TIME_PRECISION_MICRO_STR;
char *prec = NULL;
switch (pDb->cfg.precision) {
case TSDB_TIME_PRECISION_MILLI: prec = TSDB_TIME_PRECISION_MILLI_STR; break;
case TSDB_TIME_PRECISION_MICRO: prec = TSDB_TIME_PRECISION_MICRO_STR; break;
case TSDB_TIME_PRECISION_NANO: prec = TSDB_TIME_PRECISION_NANO_STR; break;
default: assert(false); break;
}
STR_WITH_SIZE_TO_VARSTR(pWrite, prec, 2);
cols++;
......
......@@ -55,6 +55,13 @@ static FORCE_INLINE int64_t taosGetTimestampUs() {
return (int64_t)systemTime.tv_sec * 1000000L + (int64_t)systemTime.tv_usec;
}
//@return timestamp in nanosecond
static FORCE_INLINE int64_t taosGetTimestampNs() {
struct timespec systemTime = {0};
clock_gettime(CLOCK_REALTIME, &systemTime);
return (int64_t)systemTime.tv_sec * 1000000000L + (int64_t)systemTime.tv_nsec;
}
/*
* @return timestamp decided by global conf variable, tsTimePrecision
* if precision == TSDB_TIME_PRECISION_MICRO, it returns timestamp in microsecond.
......@@ -63,7 +70,9 @@ static FORCE_INLINE int64_t taosGetTimestampUs() {
static FORCE_INLINE int64_t taosGetTimestamp(int32_t precision) {
if (precision == TSDB_TIME_PRECISION_MICRO) {
return taosGetTimestampUs();
} else {
} else if (precision == TSDB_TIME_PRECISION_NANO) {
return taosGetTimestampNs();
}else {
return taosGetTimestampMs();
}
}
......@@ -88,12 +97,13 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts);
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit);
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* ts, int32_t timePrecision);
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision);
int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision);
#ifdef __cplusplus
}
#endif
......
......@@ -59,6 +59,9 @@ bool taosMbsToUcs4(char *mbs, size_t mbsLength, char *ucs4, int32_t ucs4_max_len
iconv_close(cd);
if (len != NULL) {
*len = (int32_t)(ucs4_max_len - outLeft);
if (*len < 0) {
return false;
}
}
return true;
......
......@@ -14,7 +14,13 @@
*/
#define _BSD_SOURCE
#ifdef DARWIN
#define _XOPEN_SOURCE
#else
#define _XOPEN_SOURCE 500
#endif
#define _DEFAULT_SOURCE
#include "os.h"
......@@ -119,8 +125,9 @@ int64_t parseFraction(char* str, char** end, int32_t timePrec) {
const int32_t MILLI_SEC_FRACTION_LEN = 3;
const int32_t MICRO_SEC_FRACTION_LEN = 6;
const int32_t NANO_SEC_FRACTION_LEN = 9;
int32_t factor[6] = {1, 10, 100, 1000, 10000, 100000};
int32_t factor[9] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000};
int32_t times = 1;
while (str[i] >= '0' && str[i] <= '9') {
......@@ -140,12 +147,17 @@ int64_t parseFraction(char* str, char** end, int32_t timePrec) {
}
times = MILLI_SEC_FRACTION_LEN - i;
} else {
assert(timePrec == TSDB_TIME_PRECISION_MICRO);
} else if (timePrec == TSDB_TIME_PRECISION_MICRO) {
if (i >= MICRO_SEC_FRACTION_LEN) {
i = MICRO_SEC_FRACTION_LEN;
}
times = MICRO_SEC_FRACTION_LEN - i;
} else {
assert(timePrec == TSDB_TIME_PRECISION_NANO);
if (i >= NANO_SEC_FRACTION_LEN) {
i = NANO_SEC_FRACTION_LEN;
}
times = NANO_SEC_FRACTION_LEN - i;
}
fraction = strnatoi(str, i) * factor[times];
......@@ -202,7 +214,9 @@ int32_t parseTimezone(char* str, int64_t* tzOffset) {
* 2013-04-12T15:52:01.123+0800
*/
int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec) {
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000;
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 :
(timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000);
int64_t tzOffset = 0;
struct tm tm = {0};
......@@ -287,7 +301,8 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) {
}
}
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000;
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 :
(timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000);
*time = factor * seconds + fraction;
return 0;
......@@ -315,37 +330,50 @@ int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) {
}
}
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 : 1000000;
int64_t factor = (timePrec == TSDB_TIME_PRECISION_MILLI) ? 1000 :
(timePrec == TSDB_TIME_PRECISION_MICRO ? 1000000 : 1000000000);
*time = factor * seconds + fraction;
return 0;
}
static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
*result = val;
int64_t factor = 1000L;
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision) {
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI ||
fromPrecision == TSDB_TIME_PRECISION_MICRO ||
fromPrecision == TSDB_TIME_PRECISION_NANO);
assert(toPrecision == TSDB_TIME_PRECISION_MILLI ||
toPrecision == TSDB_TIME_PRECISION_MICRO ||
toPrecision == TSDB_TIME_PRECISION_NANO);
static double factors[3][3] = { {1., 1000., 1000000.},
{1.0 / 1000, 1., 1000.},
{1.0 / 1000000, 1.0 / 1000, 1.} };
return (int64_t)((double)time * factors[fromPrecision][toPrecision]);
}
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
switch (unit) {
case 's':
(*result) *= MILLISECOND_PER_SECOND*factor;
(*result) = convertTimePrecision(val * MILLISECOND_PER_SECOND, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'm':
(*result) *= MILLISECOND_PER_MINUTE*factor;
(*result) = convertTimePrecision(val * MILLISECOND_PER_MINUTE, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'h':
(*result) *= MILLISECOND_PER_HOUR*factor;
(*result) = convertTimePrecision(val * MILLISECOND_PER_HOUR, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'd':
(*result) *= MILLISECOND_PER_DAY*factor;
(*result) = convertTimePrecision(val * MILLISECOND_PER_DAY, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'w':
(*result) *= MILLISECOND_PER_WEEK*factor;
(*result) = convertTimePrecision(val * MILLISECOND_PER_WEEK, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'a':
(*result) *= factor;
(*result) = convertTimePrecision(val, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'u':
(*result) = convertTimePrecision(val, TSDB_TIME_PRECISION_MICRO, timePrecision);
break;
case 'b':
(*result) = convertTimePrecision(val, TSDB_TIME_PRECISION_NANO, timePrecision);
break;
default: {
return -1;
......@@ -357,6 +385,8 @@ static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
}
/*
* b - nanoseconds;
* u - microseconds;
* a - Millionseconds
* s - Seconds
* m - Minutes
......@@ -366,7 +396,7 @@ static int32_t getDurationInUs(int64_t val, char unit, int64_t* result) {
* n - Months (30 days)
* y - Years (365 days)
*/
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration) {
int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration, int32_t timePrecision) {
errno = 0;
char* endPtr = NULL;
......@@ -382,10 +412,10 @@ int32_t parseAbsoluteDuration(char* token, int32_t tokenlen, int64_t* duration)
return -1;
}
return getDurationInUs(timestamp, unit, duration);
return getDuration(timestamp, unit, duration, timePrecision);
}
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit) {
int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* duration, char* unit, int32_t timePrecision) {
errno = 0;
/* get the basic numeric value */
......@@ -399,7 +429,7 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
return 0;
}
return getDurationInUs(*duration, *unit, duration);
return getDuration(*duration, *unit, duration, timePrecision);
}
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
......
......@@ -64,8 +64,8 @@ void httpJsonOriginString(JsonBuf* buf, char* sVal, int32_t len);
void httpJsonStringForTransMean(JsonBuf* buf, char* SVal, int32_t maxLen);
void httpJsonInt64(JsonBuf* buf, int64_t num);
void httpJsonUInt64(JsonBuf* buf, uint64_t num);
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us);
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us);
void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision);
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision);
void httpJsonInt(JsonBuf* buf, int32_t num);
void httpJsonUInt(JsonBuf* buf, uint32_t num);
void httpJsonFloat(JsonBuf* buf, float num);
......
......@@ -262,42 +262,92 @@ void httpJsonUInt64(JsonBuf* buf, uint64_t num) {
buf->lst += snprintf(buf->lst, MAX_NUM_STR_SZ, "%" PRIu64, num);
}
void httpJsonTimestamp(JsonBuf* buf, int64_t t, bool us) {
void httpJsonTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
char ts[35] = {0};
struct tm* ptm;
int32_t precision = 1000;
if (us) {
precision = 1000000;
int32_t fractionLen;
char* format = NULL;
time_t quot = 0;
long mod = 0;
switch (timePrecision) {
case TSDB_TIME_PRECISION_MILLI: {
quot = t / 1000;
fractionLen = 5;
format = ".%03" PRId64;
mod = t % 1000;
break;
}
case TSDB_TIME_PRECISION_MICRO: {
quot = t / 1000000;
fractionLen = 8;
format = ".%06" PRId64;
mod = t % 1000000;
break;
}
case TSDB_TIME_PRECISION_NANO: {
quot = t / 1000000000;
fractionLen = 11;
format = ".%09" PRId64;
mod = t % 1000000000;
break;
}
default:
assert(false);
}
time_t tt = t / precision;
ptm = localtime(&tt);
ptm = localtime(&quot);
int32_t length = (int32_t)strftime(ts, 35, "%Y-%m-%d %H:%M:%S", ptm);
if (us) {
length += snprintf(ts + length, 8, ".%06" PRId64, t % precision);
} else {
length += snprintf(ts + length, 5, ".%03" PRId64, t % precision);
}
length += snprintf(ts + length, fractionLen, format, mod);
httpJsonString(buf, ts, length);
}
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, bool us) {
void httpJsonUtcTimestamp(JsonBuf* buf, int64_t t, int32_t timePrecision) {
char ts[40] = {0};
struct tm* ptm;
int32_t precision = 1000;
if (us) {
precision = 1000000;
int32_t fractionLen;
char* format = NULL;
time_t quot = 0;
long mod = 0;
switch (timePrecision) {
case TSDB_TIME_PRECISION_MILLI: {
quot = t / 1000;
fractionLen = 5;
format = ".%03" PRId64;
mod = t % 1000;
break;
}
case TSDB_TIME_PRECISION_MICRO: {
quot = t / 1000000;
fractionLen = 8;
format = ".%06" PRId64;
mod = t % 1000000;
break;
}
case TSDB_TIME_PRECISION_NANO: {
quot = t / 1000000000;
fractionLen = 11;
format = ".%09" PRId64;
mod = t % 1000000000;
break;
}
default:
assert(false);
}
time_t tt = t / precision;
ptm = localtime(&tt);
ptm = localtime(&quot);
int32_t length = (int32_t)strftime(ts, 40, "%Y-%m-%dT%H:%M:%S", ptm);
if (us) {
length += snprintf(ts + length, 8, ".%06" PRId64, t % precision);
} else {
length += snprintf(ts + length, 5, ".%03" PRId64, t % precision);
}
length += snprintf(ts + length, fractionLen, format, mod);
length += (int32_t)strftime(ts + length, 40 - length, "%z", ptm);
httpJsonString(buf, ts, length);
......
......@@ -186,13 +186,11 @@ bool restBuildSqlJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result,
break;
case TSDB_DATA_TYPE_TIMESTAMP:
if (timestampFormat == REST_TIMESTAMP_FMT_LOCAL_STRING) {
httpJsonTimestamp(jsonBuf, *((int64_t *)row[i]),
taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO);
httpJsonTimestamp(jsonBuf, *((int64_t *)row[i]), taos_result_precision(result));
} else if (timestampFormat == REST_TIMESTAMP_FMT_TIMESTAMP) {
httpJsonInt64(jsonBuf, *((int64_t *)row[i]));
} else {
httpJsonUtcTimestamp(jsonBuf, *((int64_t *)row[i]),
taos_result_precision(result) == TSDB_TIME_PRECISION_MICRO);
httpJsonUtcTimestamp(jsonBuf, *((int64_t *)row[i]), taos_result_precision(result));
}
break;
default:
......
......@@ -464,6 +464,7 @@ typedef struct SSWindowOperatorInfo {
TSKEY prevTs; // previous timestamp
int32_t numOfRows; // number of rows
int32_t start; // start row index
bool reptScan; // next round scan
} SSWindowOperatorInfo;
typedef struct SStateWindowOperatorInfo {
......@@ -473,7 +474,7 @@ typedef struct SStateWindowOperatorInfo {
int32_t colIndex; // start row index
int32_t start;
char* prevData; // previous data
bool reptScan;
} SStateWindowOperatorInfo ;
typedef struct SDistinctOperatorInfo {
......
......@@ -46,7 +46,7 @@ enum SQL_NODE_FROM_TYPE {
enum SQL_EXPR_FLAG {
EXPR_FLAG_TS_ERROR = 1,
EXPR_FLAG_US_TIMESTAMP = 2,
EXPR_FLAG_NS_TIMESTAMP = 2,
EXPR_FLAG_TIMESTAMP_VAR = 3,
};
......
......@@ -559,10 +559,8 @@ session_option(X) ::= SESSION LP ids(V) cpxName(Z) COMMA tmvar(Y) RP. {
X.gap = Y;
}
%type windowstate_option {SWindowStateVal}
windowstate_option(X) ::= . {X.col.n = 0;}
windowstate_option(X) ::= STATE_WINDOW LP ids(V) RP. {
X.col = V;
}
windowstate_option(X) ::= . { X.col.n = 0; X.col.z = NULL;}
windowstate_option(X) ::= STATE_WINDOW LP ids(V) RP. { X.col = V; }
%type fill_opt {SArray*}
%destructor fill_opt {taosArrayDestroy($$);}
......
......@@ -3428,7 +3428,7 @@ static bool deriv_function_setup(SQLFunctionCtx *pCtx) {
SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx);
SDerivInfo* pDerivInfo = GET_ROWCELL_INTERBUF(pResInfo);
pDerivInfo->ignoreNegative = pCtx->param[2].i64;
pDerivInfo->ignoreNegative = pCtx->param[1].i64;
pDerivInfo->prevTs = -1;
pDerivInfo->tsWindow = pCtx->param[0].i64;
pDerivInfo->valueSet = false;
......@@ -3440,10 +3440,8 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
SDerivInfo* pDerivInfo = GET_ROWCELL_INTERBUF(pResInfo);
void *data = GET_INPUT_DATA_LIST(pCtx);
bool isFirstBlock = (pDerivInfo->valueSet == false);
int32_t notNullElems = 0;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order);
int32_t i = (pCtx->order == TSDB_ORDER_ASC) ? 0 : pCtx->size - 1;
......@@ -3469,12 +3467,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
*pTimestamp = tsList[i];
pOutput += 1;
pTimestamp += 1;
notNullElems++;
}
}
pDerivInfo->prevValue = pData[i];
pDerivInfo->prevTs = tsList[i];
notNullElems++;
}
break;
......@@ -3496,12 +3494,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
*pTimestamp = tsList[i];
pOutput += 1;
pTimestamp += 1;
notNullElems++;
}
}
pDerivInfo->prevValue = (double) pData[i];
pDerivInfo->prevTs = tsList[i];
notNullElems++;
}
break;
}
......@@ -3522,12 +3520,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
*pTimestamp = tsList[i];
pOutput += 1;
pTimestamp += 1;
notNullElems++;
}
}
pDerivInfo->prevValue = pData[i];
pDerivInfo->prevTs = tsList[i];
notNullElems++;
}
break;
}
......@@ -3549,12 +3547,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
*pTimestamp = tsList[i];
pOutput += 1;
pTimestamp += 1;
notNullElems++;
}
}
pDerivInfo->prevValue = pData[i];
pDerivInfo->prevTs = tsList[i];
notNullElems++;
}
break;
}
......@@ -3575,12 +3573,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
*pTimestamp = tsList[i];
pOutput += 1;
pTimestamp += 1;
notNullElems++;
}
}
pDerivInfo->prevValue = pData[i];
pDerivInfo->prevTs = tsList[i];
notNullElems++;
}
break;
}
......@@ -3602,12 +3600,12 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
pOutput += 1;
pTimestamp += 1;
notNullElems++;
}
}
pDerivInfo->prevValue = pData[i];
pDerivInfo->prevTs = tsList[i];
notNullElems++;
}
break;
}
......@@ -3623,8 +3621,7 @@ static void deriv_function(SQLFunctionCtx *pCtx) {
*/
assert(pCtx->hasNull);
} else {
int32_t forwardStep = (isFirstBlock) ? notNullElems - 1 : notNullElems;
GET_RES_INFO(pCtx)->numOfRes += forwardStep;
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
}
}
......@@ -4687,8 +4684,8 @@ static bool rate_function_setup(SQLFunctionCtx *pCtx) {
pInfo->correctionValue = 0;
pInfo->firstKey = INT64_MIN;
pInfo->lastKey = INT64_MIN;
pInfo->firstValue = INT64_MIN;
pInfo->lastValue = INT64_MIN;
pInfo->firstValue = (double) INT64_MIN;
pInfo->lastValue = (double) INT64_MIN;
pInfo->hasResult = 0;
pInfo->isIRate = (pCtx->functionId == TSDB_FUNC_IRATE);
......@@ -5003,6 +5000,19 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
min = totalBlocks > 0 ? pTableBlockDist->minRows : 0;
max = totalBlocks > 0 ? pTableBlockDist->maxRows : 0;
double stdDev = 0;
if (totalBlocks > 0) {
double variance = 0;
for (int32_t i = 0; i < numSteps; i++) {
SFileBlockInfo *blockInfo = taosArrayGet(blockInfos, i);
int64_t blocks = blockInfo->numBlocksOfStep;
int32_t rows = (i * TSDB_BLOCK_DIST_STEP_ROWS + TSDB_BLOCK_DIST_STEP_ROWS / 2);
variance += blocks * (rows - avg) * (rows - avg);
}
variance = variance / totalBlocks;
stdDev = sqrt(variance);
}
double percents[] = {0.05, 0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 0.95, 0.99};
int32_t percentiles[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
assert(sizeof(percents)/sizeof(double) == sizeof(percentiles)/sizeof(int32_t));
......@@ -5017,12 +5027,12 @@ void generateBlockDistResult(STableBlockDist *pTableBlockDist, char* result) {
"60th=[%d], 70th=[%d], 80th=[%d], 90th=[%d], 95th=[%d], 99th=[%d]\n\t "
"Min=[%"PRId64"(Rows)] Max=[%"PRId64"(Rows)] Avg=[%"PRId64"(Rows)] Stddev=[%.2f] \n\t "
"Rows=[%"PRIu64"], Blocks=[%"PRId64"], Size=[%.3f(Kb)] Comp=[%.2f]\n\t "
"RowsInMem=[%d] \n\t SeekHeaderTime=[%d(us)]",
"RowsInMem=[%d] \n\t",
percentiles[0], percentiles[1], percentiles[2], percentiles[3], percentiles[4], percentiles[5],
percentiles[6], percentiles[7], percentiles[8], percentiles[9], percentiles[10], percentiles[11],
min, max, avg, 0.0,
min, max, avg, stdDev,
totalRows, totalBlocks, totalLen/1024.0, compRatio,
pTableBlockDist->numOfRowsInMemTable, pTableBlockDist->firstSeekTimeUs);
pTableBlockDist->numOfRowsInMemTable);
varDataSetLen(result, sz);
UNUSED(sz);
}
......@@ -5290,7 +5300,7 @@ SAggFunctionInfo aAggs[] = {{
},
{
// 17
"ts_dummy",
"ts",
TSDB_FUNC_TS_DUMMY,
TSDB_FUNC_TS_DUMMY,
TSDB_BASE_FUNC_SO | TSDB_FUNCSTATE_NEED_TS,
......@@ -5384,7 +5394,7 @@ SAggFunctionInfo aAggs[] = {{
"diff",
TSDB_FUNC_DIFF,
TSDB_FUNC_INVALID_ID,
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS,
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
diff_function_setup,
diff_function,
diff_function_f,
......@@ -5488,7 +5498,7 @@ SAggFunctionInfo aAggs[] = {{
"derivative", // return table id and the corresponding tags for join match and subscribe
TSDB_FUNC_DERIVATIVE,
TSDB_FUNC_INVALID_ID,
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS,
TSDB_FUNCSTATE_MO | TSDB_FUNCSTATE_STABLE | TSDB_FUNCSTATE_NEED_TS | TSDB_FUNCSTATE_SELECTIVITY,
deriv_function_setup,
deriv_function,
noop2,
......
......@@ -30,6 +30,7 @@
#define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN)
#define IS_REVERSE_SCAN(runtime) ((runtime)->scanFlag == REVERSE_SCAN)
#define IS_REPEAT_SCAN(runtime) ((runtime)->scanFlag == REPEAT_SCAN)
#define SET_MASTER_SCAN_FLAG(runtime) ((runtime)->scanFlag = MASTER_SCAN)
#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN)
......@@ -130,10 +131,10 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) {
return;
}
int64_t key = tw->skey / 1000, interval = pQueryAttr->interval.interval;
if (pQueryAttr->precision == TSDB_TIME_PRECISION_MICRO) {
key /= 1000;
}
int64_t key = tw->skey, interval = pQueryAttr->interval.interval;
//convert key to second
key = convertTimePrecision(key, pQueryAttr->precision, TSDB_TIME_PRECISION_MILLI) / 1000;
if (pQueryAttr->interval.intervalUnit == 'y') {
interval *= 12;
}
......@@ -145,17 +146,13 @@ static void getNextTimeWindow(SQueryAttr* pQueryAttr, STimeWindow* tw) {
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
tw->skey = mktime(&tm) * 1000L;
tw->skey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
mon = (int)(mon + interval);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
tw->ekey = mktime(&tm) * 1000L;
tw->ekey = convertTimePrecision(mktime(&tm) * 1000L, TSDB_TIME_PRECISION_MILLI, pQueryAttr->precision);
if (pQueryAttr->precision == TSDB_TIME_PRECISION_MICRO) {
tw->skey *= 1000L;
tw->ekey *= 1000L;
}
tw->ekey -= 1;
}
......@@ -735,6 +732,7 @@ static void doApplyFunctions(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx
if (pCtx[k].preAggVals.isSet && forwardStep < numOfTotal) {
pCtx[k].preAggVals.isSet = false;
}
if (functionNeedToExecute(pRuntimeEnv, &pCtx[k], functionId)) {
aAggs[functionId].xFunction(&pCtx[k]);
}
......@@ -918,7 +916,7 @@ void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlo
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
}
} else {
if (pCtx[0].pInput == NULL && pBlock->pDataBlock != NULL) {
if (/*pCtx[0].pInput == NULL && */pBlock->pDataBlock != NULL) {
doSetInputDataBlock(pOperator, pCtx, pBlock, order);
} else {
doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order);
......@@ -1169,7 +1167,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
SQueryRuntimeEnv* pRuntimeEnv = pOperatorInfo->pRuntimeEnv;
int32_t numOfOutput = pOperatorInfo->numOfOutput;
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr;
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQueryAttr->order.order);
bool ascQuery = QUERY_IS_ASC_QUERY(pQueryAttr);
......@@ -1336,6 +1334,10 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
int64_t gap = pOperator->pRuntimeEnv->pQueryAttr->sw.gap;
pInfo->numOfRows = 0;
if (IS_REPEAT_SCAN(pRuntimeEnv) && !pInfo->reptScan) {
pInfo->reptScan = true;
pInfo->prevTs = INT64_MIN;
}
TSKEY* tsList = (TSKEY*)pColInfoData->pData;
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
......@@ -1345,7 +1347,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSWindowOperatorInf
pInfo->prevTs = tsList[j];
pInfo->numOfRows = 1;
pInfo->start = j;
} else if (tsList[j] - pInfo->prevTs <= gap) {
} else if (tsList[j] - pInfo->prevTs <= gap && (tsList[j] - pInfo->prevTs) >= 0) {
pInfo->curWindow.ekey = tsList[j];
pInfo->prevTs = tsList[j];
pInfo->numOfRows += 1;
......@@ -1681,8 +1683,6 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf
SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr;
pRuntimeEnv->prevGroupId = INT32_MIN;
pRuntimeEnv->enableGroupData = false;
pRuntimeEnv->pQueryAttr = pQueryAttr;
pRuntimeEnv->pResultRowHashTable = taosHashInit(numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
......@@ -3094,7 +3094,7 @@ int32_t initResultRow(SResultRow *pResultRow) {
* +------------+-----------------result column 1-----------+-----------------result column 2-----------+
* + SResultRow | SResultRowCellInfo | intermediate buffer1 | SResultRowCellInfo | intermediate buffer 2|
* +------------+-------------------------------------------+-------------------------------------------+
* offset[0] offset[1]
* offset[0] offset[1] offset[2]
*/
void setDefaultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SOptrBasicInfo *pInfo, int64_t uid, int32_t stage) {
SQLFunctionCtx* pCtx = pInfo->pCtx;
......@@ -3323,7 +3323,7 @@ void setResultRowOutputBufInitCtx(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pRe
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
}
......@@ -3381,7 +3381,7 @@ void setResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResult, SQLF
offset += pCtx[i].outputBytes;
int32_t functionId = pCtx[i].functionId;
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF) {
if (functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM || functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_DERIVATIVE) {
pCtx[i].ptsOutputBuf = pCtx[0].pOutput;
}
......@@ -3589,6 +3589,8 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo*
int32_t step = -1;
qDebug("QInfo:0x%"PRIx64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv));
assert(orderType == TSDB_ORDER_ASC || orderType == TSDB_ORDER_DESC);
if (orderType == TSDB_ORDER_ASC) {
start = pGroupResInfo->index;
step = 1;
......@@ -4115,6 +4117,7 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr
pQueryAttr->interBufSize = getOutputInterResultBufSize(pQueryAttr);
pRuntimeEnv->groupResInfo.totalGroup = (int32_t) (pQueryAttr->stableQuery? GET_NUM_OF_TABLEGROUP(pRuntimeEnv):0);
pRuntimeEnv->enableGroupData = false;
pRuntimeEnv->pQueryAttr = pQueryAttr;
pRuntimeEnv->pTsBuf = pTsBuf;
......@@ -4570,7 +4573,7 @@ SOperatorInfo* createDataBlocksOptScanInfo(void* pTsdbQueryHandle, SQueryRuntime
}
SArray* getOrderCheckColumns(SQueryAttr* pQuery) {
int32_t numOfCols = pQuery->pGroupbyExpr->numOfGroupCols;
int32_t numOfCols = pQuery->pGroupbyExpr == NULL? 0: pQuery->pGroupbyExpr->numOfGroupCols;
SArray* pOrderColumns = NULL;
if (numOfCols > 0) {
......@@ -4609,7 +4612,7 @@ SArray* getOrderCheckColumns(SQueryAttr* pQuery) {
}
SArray* getResultGroupCheckColumns(SQueryAttr* pQuery) {
int32_t numOfCols = pQuery->pGroupbyExpr->numOfGroupCols;
int32_t numOfCols = pQuery->pGroupbyExpr == NULL? 0 : pQuery->pGroupbyExpr->numOfGroupCols;
SArray* pOrderColumns = NULL;
if (numOfCols > 0) {
......@@ -5175,6 +5178,10 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
SColumnInfoData* pTsColInfoData = taosArrayGet(pSDataBlock->pDataBlock, 0);
TSKEY* tsList = (TSKEY*)pTsColInfoData->pData;
if (IS_REPEAT_SCAN(pRuntimeEnv) && !pInfo->reptScan) {
pInfo->reptScan = true;
tfree(pInfo->prevData);
}
pInfo->numOfRows = 0;
for (int32_t j = 0; j < pSDataBlock->info.rows; ++j) {
......@@ -5761,6 +5768,7 @@ SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOp
SOperatorInfo* createStatewindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) {
SStateWindowOperatorInfo* pInfo = calloc(1, sizeof(SStateWindowOperatorInfo));
pInfo->colIndex = -1;
pInfo->reptScan = false;
pInfo->binfo.pCtx = createSQLFunctionCtx(pRuntimeEnv, pExpr, numOfOutput, &pInfo->binfo.rowCellInfoOffset);
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
......@@ -5788,7 +5796,8 @@ SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperato
pInfo->binfo.pRes = createOutputBuf(pExpr, numOfOutput, pRuntimeEnv->resultInfo.capacity);
initResultRowInfo(&pInfo->binfo.resultRowInfo, 8, TSDB_DATA_TYPE_INT);
pInfo->prevTs = INT64_MIN;
pInfo->prevTs = INT64_MIN;
pInfo->reptScan = false;
SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo));
pOperator->name = "SessionWindowAggOperator";
......@@ -7213,7 +7222,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SGroupbyExpr* pGroupbyExpr, S
// todo refactor
pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.functionId == TSDB_FUNC_BLKINFO);
qDebug("qmsg:%p QInfo:0x%" PRIx64 "-%p created", pQueryMsg, pQInfo->qId, pQInfo);
qDebug("qmsg:%p vgId:%d, QInfo:0x%" PRIx64 "-%p created", pQueryMsg, pQInfo->query.vgId, pQInfo->qId, pQInfo);
return pQInfo;
_cleanup_qinfo:
......
......@@ -139,19 +139,20 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) {
pSqlExpr->tokenId = optrType;
pSqlExpr->type = SQL_NODE_VALUE;
} else if (optrType == TK_NOW) {
// use microsecond by default
pSqlExpr->value.i64 = taosGetTimestamp(TSDB_TIME_PRECISION_MICRO);
// use nanosecond by default TODO set value after getting database precision
pSqlExpr->value.i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO);
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
pSqlExpr->tokenId = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
pSqlExpr->type = SQL_NODE_VALUE;
pSqlExpr->flags |= 1 << EXPR_FLAG_US_TIMESTAMP;
pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP;
} else if (optrType == TK_VARIABLE) {
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64);
// use nanosecond by default TODO set value after getting database precision
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, TSDB_TIME_PRECISION_NANO);
if (ret != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
pSqlExpr->flags |= 1 << EXPR_FLAG_US_TIMESTAMP;
pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP;
pSqlExpr->flags |= 1 << EXPR_FLAG_TIMESTAMP_VAR;
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
pSqlExpr->tokenId = TK_TIMESTAMP;
......
......@@ -132,7 +132,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
numOfGroupByCols = 0;
}
qDebug("qmsg:%p query stable, uid:%"PRId64", tid:%d", pQueryMsg, id->uid, id->tid);
qDebug("qmsg:%p query stable, uid:%"PRIu64", tid:%d", pQueryMsg, id->uid, id->tid);
code = tsdbQuerySTableByTagCond(tsdb, id->uid, pQueryMsg->window.skey, param.tagCond, pQueryMsg->tagCondLen,
pQueryMsg->tagNameRelType, param.tbnameCond, &tableGroupInfo, param.pGroupColIndex, numOfGroupByCols);
......@@ -162,7 +162,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi
assert(pQueryMsg->stableQuery == isSTableQuery);
(*pQInfo) = createQInfoImpl(pQueryMsg, param.pGroupbyExpr, param.pExprs, param.pSecExprs, &tableGroupInfo,
param.pTagColumnInfo, vgId, param.sql, qId);
param.pTagColumnInfo, vgId, param.sql, qId);
param.sql = NULL;
param.pExprs = NULL;
......
此差异已折叠。
......@@ -92,6 +92,7 @@ struct STsdbRepo {
pthread_mutex_t mutex;
bool repoLocked;
int32_t code; // Commit code
bool inCompact; // is in compact process?
};
#define REPO_ID(r) (r)->config.tsdbId
......
......@@ -12,11 +12,516 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "tsdb.h"
#include "tsdbint.h"
#ifndef _TSDB_PLUGINS
int tsdbCompact(STsdbRepo *pRepo) { return 0; }
void *tsdbCompactImpl(STsdbRepo *pRepo) { return NULL; }
typedef struct {
STable * pTable;
SBlockIdx * pBlkIdx;
SBlockIdx bindex;
SBlockInfo *pInfo;
} STableCompactH;
typedef struct {
SRtn rtn;
SFSIter fsIter;
SArray * tbArray; // table array to cache table obj and block indexes
SReadH readh;
SDFileSet wSet;
SArray * aBlkIdx;
SArray * aSupBlk;
SDataCols *pDataCols;
} SCompactH;
#define TSDB_COMPACT_WSET(pComph) (&((pComph)->wSet))
#define TSDB_COMPACT_REPO(pComph) TSDB_READ_REPO(&((pComph)->readh))
#define TSDB_COMPACT_HEAD_FILE(pComph) TSDB_DFILE_IN_SET(TSDB_COMPACT_WSET(pComph), TSDB_FILE_HEAD)
#define TSDB_COMPACT_DATA_FILE(pComph) TSDB_DFILE_IN_SET(TSDB_COMPACT_WSET(pComph), TSDB_FILE_DATA)
#define TSDB_COMPACT_LAST_FILE(pComph) TSDB_DFILE_IN_SET(TSDB_COMPACT_WSET(pComph), TSDB_FILE_LAST)
#define TSDB_COMPACT_BUF(pComph) TSDB_READ_BUF(&((pComph)->readh))
#define TSDB_COMPACT_COMP_BUF(pComph) TSDB_READ_COMP_BUF(&((pComph)->readh))
static int tsdbAsyncCompact(STsdbRepo *pRepo);
static void tsdbStartCompact(STsdbRepo *pRepo);
static void tsdbEndCompact(STsdbRepo *pRepo, int eno);
static int tsdbCompactMeta(STsdbRepo *pRepo);
static int tsdbCompactTSData(STsdbRepo *pRepo);
static int tsdbCompactFSet(SCompactH *pComph, SDFileSet *pSet);
static bool tsdbShouldCompact(SCompactH *pComph);
static int tsdbInitCompactH(SCompactH *pComph, STsdbRepo *pRepo);
static void tsdbDestroyCompactH(SCompactH *pComph);
static int tsdbInitCompTbArray(SCompactH *pComph);
static void tsdbDestroyCompTbArray(SCompactH *pComph);
static int tsdbCacheFSetIndex(SCompactH *pComph);
static int tsdbCompactFSetInit(SCompactH *pComph, SDFileSet *pSet);
static void tsdbCompactFSetEnd(SCompactH *pComph);
static int tsdbCompactFSetImpl(SCompactH *pComph);
static int tsdbWriteBlockToRightFile(SCompactH *pComph, STable *pTable, SDataCols *pDataCols, void **ppBuf,
void **ppCBuf);
int tsdbCompact(STsdbRepo *pRepo) { return tsdbAsyncCompact(pRepo); }
void *tsdbCompactImpl(STsdbRepo *pRepo) {
// Check if there are files in TSDB FS to compact
if (REPO_FS(pRepo)->cstatus->pmf == NULL) {
tsdbInfo("vgId:%d no file to compact in FS", REPO_ID(pRepo));
return NULL;
}
tsdbStartCompact(pRepo);
if (tsdbCompactMeta(pRepo) < 0) {
tsdbError("vgId:%d failed to compact META data since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
if (tsdbCompactTSData(pRepo) < 0) {
tsdbError("vgId:%d failed to compact TS data since %s", REPO_ID(pRepo), tstrerror(terrno));
goto _err;
}
tsdbEndCompact(pRepo, TSDB_CODE_SUCCESS);
return NULL;
_err:
pRepo->code = terrno;
tsdbEndCompact(pRepo, terrno);
return NULL;
}
static int tsdbAsyncCompact(STsdbRepo *pRepo) {
tsem_wait(&(pRepo->readyToCommit));
return tsdbScheduleCommit(pRepo, COMPACT_REQ);
}
static void tsdbStartCompact(STsdbRepo *pRepo) {
ASSERT(!pRepo->inCompact);
tsdbInfo("vgId:%d start to compact!", REPO_ID(pRepo));
tsdbStartFSTxn(pRepo, 0, 0);
pRepo->code = TSDB_CODE_SUCCESS;
pRepo->inCompact = true;
}
static void tsdbEndCompact(STsdbRepo *pRepo, int eno) {
if (eno != TSDB_CODE_SUCCESS) {
tsdbEndFSTxnWithError(REPO_FS(pRepo));
} else {
tsdbEndFSTxn(pRepo);
}
pRepo->inCompact = false;
tsdbInfo("vgId:%d compact over, %s", REPO_ID(pRepo), (eno == TSDB_CODE_SUCCESS) ? "succeed" : "failed");
tsem_post(&(pRepo->readyToCommit));
}
static int tsdbCompactMeta(STsdbRepo *pRepo) {
STsdbFS *pfs = REPO_FS(pRepo);
tsdbUpdateMFile(pfs, pfs->cstatus->pmf);
return 0;
}
static int tsdbCompactTSData(STsdbRepo *pRepo) {
SCompactH compactH;
SDFileSet *pSet = NULL;
tsdbDebug("vgId:%d start to compact TS data", REPO_ID(pRepo));
// If no file, just return 0;
if (taosArrayGetSize(REPO_FS(pRepo)->cstatus->df) <= 0) {
tsdbDebug("vgId:%d no TS data file to compact, compact over", REPO_ID(pRepo));
return 0;
}
if (tsdbInitCompactH(&compactH, pRepo) < 0) {
return -1;
}
while ((pSet = tsdbFSIterNext(&(compactH.fsIter)))) {
// Remove those expired files
if (pSet->fid < compactH.rtn.minFid) {
tsdbInfo("vgId:%d FSET %d on level %d disk id %d expires, remove it", REPO_ID(pRepo), pSet->fid,
TSDB_FSET_LEVEL(pSet), TSDB_FSET_ID(pSet));
continue;
}
if (TSDB_FSET_LEVEL(pSet) == TFS_MAX_LEVEL) {
tsdbDebug("vgId:%d FSET %d on level %d, should not compact", REPO_ID(pRepo), pSet->fid, TFS_MAX_LEVEL);
tsdbUpdateDFileSet(REPO_FS(pRepo), pSet);
continue;
}
if (tsdbCompactFSet(&compactH, pSet) < 0) {
tsdbDestroyCompactH(&compactH);
tsdbError("vgId:%d failed to compact FSET %d since %s", REPO_ID(pRepo), pSet->fid, tstrerror(terrno));
return -1;
}
}
tsdbDestroyCompactH(&compactH);
tsdbDebug("vgId:%d compact TS data over", REPO_ID(pRepo));
return 0;
}
static int tsdbCompactFSet(SCompactH *pComph, SDFileSet *pSet) {
STsdbRepo *pRepo = TSDB_COMPACT_REPO(pComph);
SDiskID did;
tsdbDebug("vgId:%d start to compact FSET %d on level %d id %d", REPO_ID(pRepo), pSet->fid, TSDB_FSET_LEVEL(pSet),
TSDB_FSET_ID(pSet));
if (tsdbCompactFSetInit(pComph, pSet) < 0) {
return -1;
}
if (!tsdbShouldCompact(pComph)) {
tsdbDebug("vgId:%d no need to compact FSET %d", REPO_ID(pRepo), pSet->fid);
if (tsdbApplyRtnOnFSet(TSDB_COMPACT_REPO(pComph), pSet, &(pComph->rtn)) < 0) {
tsdbCompactFSetEnd(pComph);
return -1;
}
} else {
// Create new fset as compacted fset
tfsAllocDisk(tsdbGetFidLevel(pSet->fid, &(pComph->rtn)), &(did.level), &(did.id));
if (did.level == TFS_UNDECIDED_LEVEL) {
terrno = TSDB_CODE_TDB_NO_AVAIL_DISK;
tsdbError("vgId:%d failed to compact FSET %d since %s", REPO_ID(pRepo), pSet->fid, tstrerror(terrno));
tsdbCompactFSetEnd(pComph);
return -1;
}
tsdbInitDFileSet(TSDB_COMPACT_WSET(pComph), did, REPO_ID(pRepo), TSDB_FSET_FID(pSet),
FS_TXN_VERSION(REPO_FS(pRepo)));
if (tsdbCreateDFileSet(TSDB_COMPACT_WSET(pComph), true) < 0) {
tsdbError("vgId:%d failed to compact FSET %d since %s", REPO_ID(pRepo), pSet->fid, tstrerror(terrno));
tsdbCompactFSetEnd(pComph);
return -1;
}
if (tsdbCompactFSetImpl(pComph) < 0) {
tsdbCloseDFileSet(TSDB_COMPACT_WSET(pComph));
tsdbRemoveDFileSet(TSDB_COMPACT_WSET(pComph));
tsdbCompactFSetEnd(pComph);
return -1;
}
tsdbCloseDFileSet(TSDB_COMPACT_WSET(pComph));
tsdbUpdateDFileSet(REPO_FS(pRepo), TSDB_COMPACT_WSET(pComph));
tsdbDebug("vgId:%d FSET %d compact over", REPO_ID(pRepo), pSet->fid);
}
tsdbCompactFSetEnd(pComph);
return 0;
}
static bool tsdbShouldCompact(SCompactH *pComph) {
STsdbRepo * pRepo = TSDB_COMPACT_REPO(pComph);
STsdbCfg * pCfg = REPO_CFG(pRepo);
SReadH * pReadh = &(pComph->readh);
STableCompactH *pTh;
SBlock * pBlock;
int defaultRows = TSDB_DEFAULT_BLOCK_ROWS(pCfg->maxRowsPerFileBlock);
SDFile * pDataF = TSDB_READ_DATA_FILE(pReadh);
SDFile * pLastF = TSDB_READ_LAST_FILE(pReadh);
int tblocks = 0; // total blocks
int nSubBlocks = 0; // # of blocks with sub-blocks
int nSmallBlocks = 0; // # of blocks with rows < defaultRows
int64_t tsize = 0;
for (size_t i = 0; i < taosArrayGetSize(pComph->tbArray); i++) {
pTh = (STableCompactH *)taosArrayGet(pComph->tbArray, i);
if (pTh->pTable == NULL || pTh->pBlkIdx == NULL) continue;
for (size_t bidx = 0; bidx < pTh->pBlkIdx->numOfBlocks; bidx++) {
tblocks++;
pBlock = pTh->pInfo->blocks + bidx;
if (pBlock->numOfRows < defaultRows) {
nSmallBlocks++;
}
if (pBlock->numOfSubBlocks > 1) {
nSubBlocks++;
for (int k = 0; k < pBlock->numOfSubBlocks; k++) {
SBlock *iBlock = ((SBlock *)POINTER_SHIFT(pTh->pInfo, pBlock->offset)) + k;
tsize = tsize + iBlock->len;
}
} else if (pBlock->numOfSubBlocks == 1) {
tsize += pBlock->len;
} else {
ASSERT(0);
}
}
}
return (((nSubBlocks * 1.0 / tblocks) > 0.33) || ((nSmallBlocks * 1.0 / tblocks) > 0.33) ||
(tsize * 1.0 / (pDataF->info.size + pLastF->info.size - 2 * TSDB_FILE_HEAD_SIZE) < 0.85));
}
static int tsdbInitCompactH(SCompactH *pComph, STsdbRepo *pRepo) {
STsdbCfg *pCfg = REPO_CFG(pRepo);
memset(pComph, 0, sizeof(*pComph));
TSDB_FSET_SET_CLOSED(TSDB_COMPACT_WSET(pComph));
tsdbGetRtnSnap(pRepo, &(pComph->rtn));
tsdbFSIterInit(&(pComph->fsIter), REPO_FS(pRepo), TSDB_FS_ITER_FORWARD);
if (tsdbInitReadH(&(pComph->readh), pRepo) < 0) {
return -1;
}
if (tsdbInitCompTbArray(pComph) < 0) {
tsdbDestroyCompactH(pComph);
return -1;
}
pComph->aBlkIdx = taosArrayInit(1024, sizeof(SBlockIdx));
if (pComph->aBlkIdx == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbDestroyCompactH(pComph);
return -1;
}
pComph->aSupBlk = taosArrayInit(1024, sizeof(SBlock));
if (pComph->aSupBlk == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbDestroyCompactH(pComph);
return -1;
}
pComph->pDataCols = tdNewDataCols(0, 0, pCfg->maxRowsPerFileBlock);
if (pComph->pDataCols == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbDestroyCompactH(pComph);
return -1;
}
return 0;
}
static void tsdbDestroyCompactH(SCompactH *pComph) {
pComph->pDataCols = tdFreeDataCols(pComph->pDataCols);
pComph->aSupBlk = taosArrayDestroy(pComph->aSupBlk);
pComph->aBlkIdx = taosArrayDestroy(pComph->aBlkIdx);
tsdbDestroyCompTbArray(pComph);
tsdbDestroyReadH(&(pComph->readh));
tsdbCloseDFileSet(TSDB_COMPACT_WSET(pComph));
}
static int tsdbInitCompTbArray(SCompactH *pComph) { // Init pComp->tbArray
STsdbRepo *pRepo = TSDB_COMPACT_REPO(pComph);
STsdbMeta *pMeta = pRepo->tsdbMeta;
if (tsdbRLockRepoMeta(pRepo) < 0) return -1;
pComph->tbArray = taosArrayInit(pMeta->maxTables, sizeof(STableCompactH));
if (pComph->tbArray == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbUnlockRepoMeta(pRepo);
return -1;
}
// Note here must start from 0
for (int i = 0; i < pMeta->maxTables; i++) {
STableCompactH ch = {0};
if (pMeta->tables[i] != NULL) {
tsdbRefTable(pMeta->tables[i]);
ch.pTable = pMeta->tables[i];
}
if (taosArrayPush(pComph->tbArray, &ch) == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbUnlockRepoMeta(pRepo);
return -1;
}
}
if (tsdbUnlockRepoMeta(pRepo) < 0) return -1;
return 0;
}
static void tsdbDestroyCompTbArray(SCompactH *pComph) {
STableCompactH *pTh;
if (pComph->tbArray == NULL) return;
for (size_t i = 0; i < taosArrayGetSize(pComph->tbArray); i++) {
pTh = (STableCompactH *)taosArrayGet(pComph->tbArray, i);
if (pTh->pTable) {
tsdbUnRefTable(pTh->pTable);
}
pTh->pInfo = taosTZfree(pTh->pInfo);
}
pComph->tbArray = taosArrayDestroy(pComph->tbArray);
}
static int tsdbCacheFSetIndex(SCompactH *pComph) {
SReadH *pReadH = &(pComph->readh);
if (tsdbLoadBlockIdx(pReadH) < 0) {
return -1;
}
for (int tid = 1; tid < taosArrayGetSize(pComph->tbArray); tid++) {
STableCompactH *pTh = (STableCompactH *)taosArrayGet(pComph->tbArray, tid);
pTh->pBlkIdx = NULL;
if (pTh->pTable == NULL) continue;
if (tsdbSetReadTable(pReadH, pTh->pTable) < 0) {
return -1;
}
if (pReadH->pBlkIdx == NULL) continue;
pTh->bindex = *(pReadH->pBlkIdx);
pTh->pBlkIdx = &(pTh->bindex);
if (tsdbMakeRoom((void **)(&(pTh->pInfo)), pTh->pBlkIdx->len) < 0) {
return -1;
}
if (tsdbLoadBlockInfo(pReadH, (void *)(pTh->pInfo)) < 0) {
return -1;
}
}
return 0;
}
static int tsdbCompactFSetInit(SCompactH *pComph, SDFileSet *pSet) {
taosArrayClear(pComph->aBlkIdx);
taosArrayClear(pComph->aSupBlk);
if (tsdbSetAndOpenReadFSet(&(pComph->readh), pSet) < 0) {
return -1;
}
if (tsdbCacheFSetIndex(pComph) < 0) {
tsdbCloseAndUnsetFSet(&(pComph->readh));
return -1;
}
return 0;
}
static void tsdbCompactFSetEnd(SCompactH *pComph) { tsdbCloseAndUnsetFSet(&(pComph->readh)); }
static int tsdbCompactFSetImpl(SCompactH *pComph) {
STsdbRepo *pRepo = TSDB_COMPACT_REPO(pComph);
STsdbCfg * pCfg = REPO_CFG(pRepo);
SReadH * pReadh = &(pComph->readh);
SBlockIdx blkIdx;
void ** ppBuf = &(TSDB_COMPACT_BUF(pComph));
void ** ppCBuf = &(TSDB_COMPACT_COMP_BUF(pComph));
int defaultRows = TSDB_DEFAULT_BLOCK_ROWS(pCfg->maxRowsPerFileBlock);
taosArrayClear(pComph->aBlkIdx);
for (int tid = 1; tid < taosArrayGetSize(pComph->tbArray); tid++) {
STableCompactH *pTh = (STableCompactH *)taosArrayGet(pComph->tbArray, tid);
STSchema * pSchema;
if (pTh->pTable == NULL || pTh->pBlkIdx == NULL) continue;
pSchema = tsdbGetTableSchemaImpl(pTh->pTable, true, true, -1);
taosArrayClear(pComph->aSupBlk);
if ((tdInitDataCols(pComph->pDataCols, pSchema) < 0) || (tdInitDataCols(pReadh->pDCols[0], pSchema) < 0) ||
(tdInitDataCols(pReadh->pDCols[1], pSchema) < 0)) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
tdFreeSchema(pSchema);
// Loop to compact each block data
for (int i = 0; i < pTh->pBlkIdx->numOfBlocks; i++) {
SBlock *pBlock = pTh->pInfo->blocks + i;
// Load the block data
if (tsdbLoadBlockData(pReadh, pBlock, pTh->pInfo) < 0) {
return -1;
}
// Merge pComph->pDataCols and pReadh->pDCols[0] and write data to file
if (pComph->pDataCols->numOfRows == 0 && pBlock->numOfRows >= defaultRows) {
if (tsdbWriteBlockToRightFile(pComph, pTh->pTable, pReadh->pDCols[0], ppBuf, ppCBuf) < 0) {
return -1;
}
} else {
int ridx = 0;
while (true) {
if (pReadh->pDCols[0]->numOfRows - ridx == 0) break;
int rowsToMerge = MIN(pReadh->pDCols[0]->numOfRows - ridx, defaultRows - pComph->pDataCols->numOfRows);
tdMergeDataCols(pComph->pDataCols, pReadh->pDCols[0], rowsToMerge, &ridx);
if (pComph->pDataCols->numOfRows < defaultRows) {
break;
}
if (tsdbWriteBlockToRightFile(pComph, pTh->pTable, pComph->pDataCols, ppBuf, ppCBuf) < 0) {
return -1;
}
tdResetDataCols(pComph->pDataCols);
}
}
}
if (pComph->pDataCols->numOfRows > 0 &&
tsdbWriteBlockToRightFile(pComph, pTh->pTable, pComph->pDataCols, ppBuf, ppCBuf) < 0) {
return -1;
}
if (tsdbWriteBlockInfoImpl(TSDB_COMPACT_HEAD_FILE(pComph), pTh->pTable, pComph->aSupBlk, NULL, ppBuf, &blkIdx) <
0) {
return -1;
}
if ((blkIdx.numOfBlocks > 0) && (taosArrayPush(pComph->aBlkIdx, (void *)(&blkIdx)) == NULL)) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
}
if (tsdbWriteBlockIdx(TSDB_COMPACT_HEAD_FILE(pComph), pComph->aBlkIdx, ppBuf) < 0) {
return -1;
}
return 0;
}
static int tsdbWriteBlockToRightFile(SCompactH *pComph, STable *pTable, SDataCols *pDataCols, void **ppBuf,
void **ppCBuf) {
STsdbRepo *pRepo = TSDB_COMPACT_REPO(pComph);
STsdbCfg * pCfg = REPO_CFG(pRepo);
SDFile * pDFile;
bool isLast;
SBlock block;
ASSERT(pDataCols->numOfRows > 0);
if (pDataCols->numOfRows < pCfg->minRowsPerFileBlock) {
pDFile = TSDB_COMPACT_LAST_FILE(pComph);
isLast = true;
} else {
pDFile = TSDB_COMPACT_DATA_FILE(pComph);
isLast = false;
}
if (tsdbWriteBlockImpl(pRepo, pTable, pDFile, pDataCols, &block, isLast, true, ppBuf, ppCBuf) < 0) {
return -1;
}
if (taosArrayPush(pComph->aSupBlk, (void *)(&block)) == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
return 0;
}
#endif
\ No newline at end of file
......@@ -195,6 +195,8 @@ STsdbRepoInfo *tsdbGetStatus(STsdbRepo *pRepo) { return NULL; }
int tsdbGetState(STsdbRepo *repo) { return repo->state; }
bool tsdbInCompact(STsdbRepo *repo) { return repo->inCompact; }
void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage) {
ASSERT(repo != NULL);
STsdbRepo *pRepo = repo;
......@@ -533,6 +535,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) {
pRepo->state = TSDB_STATE_OK;
pRepo->code = TSDB_CODE_SUCCESS;
pRepo->inCompact = false;
pRepo->config = *pCfg;
if (pAppH) {
pRepo->appH = *pAppH;
......
......@@ -218,11 +218,6 @@ static void tsdbMayUnTakeMemSnapshot(STsdbQueryHandle* pQueryHandle) {
int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) {
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
assert(pQueryHandle->activeIndex < size && pQueryHandle->activeIndex >= 0 && size >= 1);
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
int64_t rows = 0;
SMemRef* pMemRef = pQueryHandle->pMemRef;
if (pMemRef == NULL) { return rows; }
......@@ -233,15 +228,19 @@ int64_t tsdbGetNumOfRowsInMemTable(TsdbQueryHandleT* pHandle) {
SMemTable* pMemT = pMemRef->snapshot.mem;
SMemTable* pIMemT = pMemRef->snapshot.imem;
if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) {
pMem = pMemT->tData[pCheckInfo->tableId.tid];
rows += (pMem && pMem->uid == pCheckInfo->tableId.uid) ? pMem->numOfRows: 0;
}
if (pIMemT && pCheckInfo->tableId.tid < pIMemT->maxTables) {
pIMem = pIMemT->tData[pCheckInfo->tableId.tid];
rows += (pIMem && pIMem->uid == pCheckInfo->tableId.uid) ? pIMem->numOfRows: 0;
size_t size = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
for (int32_t i = 0; i < size; ++i) {
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, i);
if (pMemT && pCheckInfo->tableId.tid < pMemT->maxTables) {
pMem = pMemT->tData[pCheckInfo->tableId.tid];
rows += (pMem && pMem->uid == pCheckInfo->tableId.uid) ? pMem->numOfRows : 0;
}
if (pIMemT && pCheckInfo->tableId.tid < pIMemT->maxTables) {
pIMem = pIMemT->tData[pCheckInfo->tableId.tid];
rows += (pIMem && pIMem->uid == pCheckInfo->tableId.uid) ? pIMem->numOfRows : 0;
}
}
return rows;
}
......@@ -1088,7 +1087,11 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p
assert(cur->pos >= 0 && cur->pos <= binfo.rows);
TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL;
tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId);
if (key != TSKEY_INITIAL_VAL) {
tsdbDebug("%p key in mem:%"PRId64", 0x%"PRIx64, pQueryHandle, key, pQueryHandle->qId);
} else {
tsdbDebug("%p no data in mem, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
}
if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) ||
(!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) {
......@@ -1152,8 +1155,14 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p
}
assert(cur->blockCompleted);
tsdbDebug("create data block from remain file block, brange:%"PRId64"-%"PRId64", rows:%d, lastKey:%"PRId64", %p",
cur->win.skey, cur->win.ekey, cur->rows, cur->lastKey, pQueryHandle);
if (cur->rows == binfo.rows) {
tsdbDebug("%p whole file block qualified, brange:%"PRId64"-%"PRId64", rows:%d, lastKey:%"PRId64", %"PRIx64,
pQueryHandle, cur->win.skey, cur->win.ekey, cur->rows, cur->lastKey, pQueryHandle->qId);
} else {
tsdbDebug("%p create data block from remain file block, brange:%"PRId64"-%"PRId64", rows:%d, total:%d, lastKey:%"PRId64", %"PRIx64,
pQueryHandle, cur->win.skey, cur->win.ekey, cur->rows, binfo.rows, cur->lastKey, pQueryHandle->qId);
}
}
return code;
......
......@@ -26,6 +26,7 @@ extern "C" {
#include "taosdef.h"
int32_t strdequote(char *src);
int32_t strRmquote(char *z, int32_t len);
size_t strtrim(char *src);
char * strnchr(char *haystack, char needle, int32_t len, bool skipquote);
char ** strsplit(char *src, const char *delim, int32_t *num);
......
......@@ -494,9 +494,9 @@ uint32_t tGetToken(char* z, uint32_t* tokenId) {
}
/* here is the 1u/1a/2s/3m/9y */
if ((z[i] == 'u' || z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' ||
if ((z[i] == 'b' || z[i] == 'u' || z[i] == 'a' || z[i] == 's' || z[i] == 'm' || z[i] == 'h' || z[i] == 'd' || z[i] == 'n' ||
z[i] == 'y' || z[i] == 'w' ||
z[i] == 'U' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' ||
z[i] == 'B' || z[i] == 'U' || z[i] == 'A' || z[i] == 'S' || z[i] == 'M' || z[i] == 'H' || z[i] == 'D' || z[i] == 'N' ||
z[i] == 'Y' || z[i] == 'W') &&
(isIdChar[(uint8_t)z[i + 1]] == 0)) {
*tokenId = TK_VARIABLE;
......
......@@ -52,6 +52,36 @@ int32_t strdequote(char *z) {
return j + 1; // only one quote, do nothing
}
int32_t strRmquote(char *z, int32_t len){
// delete escape character: \\, \', \"
char delim = z[0];
if (delim != '\'' && delim != '\"') {
return len;
}
int32_t cnt = 0;
int32_t j = 0;
for (uint32_t k = 1; k < len - 1; ++k) {
if (z[k] == '\\' || (z[k] == delim && z[k + 1] == delim)) {
z[j] = z[k + 1];
cnt++;
j++;
k++;
continue;
}
z[j] = z[k];
j++;
}
z[j] = 0;
return len - 2 - cnt;
}
size_t strtrim(char *z) {
int32_t i = 0;
int32_t j = 0;
......
......@@ -86,7 +86,7 @@ static int print_result(TAOS_RES* res, int blockFetch) {
}
} else {
while ((row = taos_fetch_row(res))) {
char temp[256];
char temp[256] = {0};
taos_print_row(temp, row, fields, num_fields);
puts(temp);
nRows++;
......@@ -391,10 +391,10 @@ void verify_prepare(TAOS* taos) {
int rows = 0;
int num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
char temp[256];
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
char temp[256] = {0};
rows++;
taos_print_row(temp, row, fields, num_fields);
printf("%s\n", temp);
......@@ -614,10 +614,10 @@ void verify_prepare2(TAOS* taos) {
int rows = 0;
int num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
char temp[256];
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
char temp[256] = {0};
rows++;
taos_print_row(temp, row, fields, num_fields);
printf("%s\n", temp);
......@@ -866,12 +866,10 @@ void verify_prepare3(TAOS* taos) {
int rows = 0;
int num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
char temp[256] = {0};
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
memset(temp, 0, sizeof(temp)/sizeof(temp[0]));
char temp[256] = {0};
rows++;
taos_print_row(temp, row, fields, num_fields);
printf("%s\n", temp);
......
......@@ -116,12 +116,12 @@ void Test(TAOS *taos, char *qstr, int index) {
int rows = 0;
int num_fields = taos_field_count(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
char temp[1024];
printf("num_fields = %d\n", num_fields);
printf("select * from table, result:\n");
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
char temp[1024] = {0};
rows++;
taos_print_row(temp, row, fields, num_fields);
printf("%s\n", temp);
......
......@@ -184,10 +184,10 @@ int main(int argc, char *argv[])
int rows = 0;
int num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
char temp[256];
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
char temp[256] = {0};
rows++;
taos_print_row(temp, row, fields, num_fields);
printf("%s\n", temp);
......
......@@ -14,8 +14,6 @@ void print_result(TAOS_RES* res, int blockFetch) {
int num_fields = taos_num_fields(res);
TAOS_FIELD* fields = taos_fetch_fields(res);
int nRows = 0;
char buf[4096];
if (blockFetch) {
nRows = taos_fetch_block(res, &row);
......@@ -25,6 +23,7 @@ void print_result(TAOS_RES* res, int blockFetch) {
//}
} else {
while ((row = taos_fetch_row(res))) {
char buf[4096] = {0};
taos_print_row(buf, row, fields, num_fields);
puts(buf);
nRows++;
......
......@@ -21,7 +21,7 @@ def pre_test(){
cmake .. > /dev/null
make > /dev/null
make install > /dev/null
pip3 install ${WKC}/src/connector/python/linux/python3/
pip3 install ${WKC}/src/connector/python/ || echo 0
'''
return 1
}
......
......@@ -23,7 +23,8 @@ class Node:
self.hostIP = hostIP
self.hostName = hostName
self.homeDir = homeDir
self.conn = Connection("{}@{}".format(username, hostName), connect_kwargs={"password": "{}".format(password)})
self.corePath = '/coredump'
self.conn = Connection("{}@{}".format(username, hostName), connect_kwargs={"password": "{}".format(password)})
def buildTaosd(self):
try:
......@@ -126,21 +127,37 @@ class Node:
except Exception as e:
print("remove taosd error for node %d " % self.index)
logging.exception(e)
def detectCoredumpFile(self):
try:
result = self.conn.run("find /coredump -name 'core_*' ", hide=True)
output = result.stdout
print("output: %s" % output)
return output
except Exception as e:
print("find coredump file error on node %d " % self.index)
logging.exception(e)
class Nodes:
def __init__(self):
self.tdnodes = []
self.tdnodes.append(Node(0, 'root', '52.143.103.7', 'node1', 'a', '/root/'))
self.tdnodes.append(Node(1, 'root', '52.250.48.222', 'node2', 'a', '/root/'))
self.tdnodes.append(Node(2, 'root', '51.141.167.23', 'node3', 'a', '/root/'))
self.tdnodes.append(Node(3, 'root', '52.247.207.173', 'node4', 'a', '/root/'))
self.tdnodes.append(Node(4, 'root', '51.141.166.100', 'node5', 'a', '/root/'))
self.tdnodes.append(Node(0, 'root', '192.168.17.194', 'taosdata', 'r', '/root/'))
# self.tdnodes.append(Node(1, 'root', '52.250.48.222', 'node2', 'a', '/root/'))
# self.tdnodes.append(Node(2, 'root', '51.141.167.23', 'node3', 'a', '/root/'))
# self.tdnodes.append(Node(3, 'root', '52.247.207.173', 'node4', 'a', '/root/'))
# self.tdnodes.append(Node(4, 'root', '51.141.166.100', 'node5', 'a', '/root/'))
def stopOneNode(self, index):
self.tdnodes[index].stopTaosd()
self.tdnodes[index].forceStopOneTaosd()
def startOneNode(self, index):
self.tdnodes[index].startOneTaosd()
def detectCoredumpFile(self, index):
return self.tdnodes[index].detectCoredumpFile()
def stopAllTaosd(self):
for i in range(len(self.tdnodes)):
......@@ -166,14 +183,32 @@ class Nodes:
for i in range(len(self.tdnodes)):
self.tdnodes[i].removeData()
# kill taosd randomly every 10 mins
nodes = Nodes()
loop = 0
while True:
loop = loop + 1
index = random.randint(0, 4)
print("loop: %d, kill taosd on node%d" %(loop, index))
nodes.stopOneNode(index)
time.sleep(60)
nodes.startOneNode(index)
time.sleep(600)
\ No newline at end of file
class Test:
def __init__(self):
self.nodes = Nodes()
# kill taosd randomly every 10 mins
def randomlyKillDnode(self):
loop = 0
while True:
index = random.randint(0, 4)
print("loop: %d, kill taosd on node%d" %(loop, index))
self.nodes.stopOneNode(index)
time.sleep(60)
self.nodes.startOneNode(index)
time.sleep(600)
loop = loop + 1
def detectCoredump(self):
loop = 0
while True:
for i in range(len(self.nodes.tdnodes)):
result = self.nodes.detectCoredumpFile(i)
print("core file path is %s" % result)
if result and not result.isspace():
self.nodes.stopAllTaosd()
print("sleep for 10 mins")
time.sleep(600)
test = Test()
test.detectCoredump()
\ No newline at end of file
......@@ -25,7 +25,7 @@ class TDTestCase:
def run(self):
tdSql.query("show variables")
tdSql.checkData(51, 1, 864000)
tdSql.checkData(53, 1, 864000)
def stop(self):
tdSql.close()
......@@ -33,4 +33,4 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
tdCases.addLinux(__file__, TDTestCase())
......@@ -135,6 +135,7 @@ run general/parser/tags_dynamically_specifiy.sim
run general/parser/set_tag_vals.sim
#unsupport run general/parser/repeatAlter.sim
#unsupport run general/parser/slimit_alter_tags.sim
run general/parser/precision_ns.sim
run general/stable/disk.sim
run general/stable/dnode3.sim
run general/stable/metrics.sim
......
......@@ -177,7 +177,7 @@ sql alter database db blocks 20
sql alter database db blocks 10
sql_error alter database db blocks 2
sql_error alter database db blocks 1
sql alter database db blocks 0
sql_error alter database db blocks 0
sql_error alter database db blocks -1
sql_error alter database db blocks 10001
......
......@@ -455,7 +455,7 @@ sql alter database db blocks 20
sql alter database db blocks 10
sql_error alter database db blocks 2
sql_error alter database db blocks 1
sql alter database db blocks 0
sql_error alter database db blocks 0
sql_error alter database db blocks -1
sql_error alter database db blocks 10001
......
......@@ -932,3 +932,125 @@ if $data32 != 0.000144445 then
return -1
endi
print ===========================> derivative
sql drop table t1
sql drop table tx;
sql drop table m1;
sql drop table if exists tm0;
sql drop table if exists tm1;
sql create table tm0(ts timestamp, k double)
sql insert into tm0 values('2015-08-18T00:00:00Z', 2.064) ('2015-08-18T00:06:00Z', 2.116) ('2015-08-18T00:12:00Z', 2.028)
sql insert into tm0 values('2015-08-18T00:18:00Z', 2.126) ('2015-08-18T00:24:00Z', 2.041) ('2015-08-18T00:30:00Z', 2.051)
sql_error select derivative(ts) from tm0;
sql_error select derivative(k) from tm0;
sql_error select derivative(k, 0, 0) from tm0;
sql_error select derivative(k, 1, 911) from tm0;
sql_error select derivative(kx, 1s, 1) from tm0;
sql_error select derivative(k, -20s, 1) from tm0;
sql_error select derivative(k, 20a, 0) from tm0;
sql_error select derivative(k, 200a, 0) from tm0;
sql_error select derivative(k, 999a, 0) from tm0;
sql_error select derivative(k, 20s, -12) from tm0;
sql select derivative(k, 1s, 0) from tm0
if $rows != 5 then
return -1
endi
if $data00 != @15-08-18 08:06:00.000@ then
return -1
endi
if $data01 != 0.000144444 then
print expect 0.000144444, actual: $data01
return -1
endi
if $data10 != @15-08-18 08:12:00.000@ then
return -1
endi
if $data11 != -0.000244444 then
return -1
endi
if $data20 != @15-08-18 08:18:00.000@ then
return -1
endi
if $data21 != 0.000272222 then
print expect 0.000272222, actual: $data21
return -1
endi
if $data30 != @15-08-18 08:24:00.000@ then
return -1
endi
if $data31 != -0.000236111 then
print expect 0.000236111, actual: $data31
return -1
endi
sql select derivative(k, 6m, 0) from tm0;
if $rows != 5 then
return -1
endi
if $data00 != @15-08-18 08:06:00.000@ then
return -1
endi
if $data01 != 0.052000000 then
print expect 0.052000000, actual: $data01
return -1
endi
if $data10 != @15-08-18 08:12:00.000@ then
return -1
endi
if $data11 != -0.088000000 then
return -1
endi
if $data20 != @15-08-18 08:18:00.000@ then
return -1
endi
if $data21 != 0.098000000 then
return -1
endi
if $data30 != @15-08-18 08:24:00.000@ then
return -1
endi
if $data31 != -0.085000000 then
return -1
endi
sql select derivative(k, 12m, 0) from tm0;
if $rows != 5 then
return -1
endi
if $data00 != @15-08-18 08:06:00.000@ then
return -1
endi
if $data01 != 0.104000000 then
print expect 0.104000000, actual: $data01
return -1
endi
sql select derivative(k, 6m, 1) from tm0;
if $rows != 3 then
return -1
endi
sql_error select derivative(k, 6m, 1) from tm0 interval(1s);
sql_error select derivative(k, 6m, 1) from tm0 session(ts, 1s);
sql_error select derivative(k, 6m, 1) from tm0 group by k;
sql_error select derivative(k, 6m, 1) from
\ No newline at end of file
......@@ -147,8 +147,57 @@ if $data02 != @nest_tb0@ then
endi
print ===================> nest query interval
sql_error select ts, avg(c1) from (select ts, c1 from nest_tb0);
sql select avg(c1) from (select * from nest_tb0) interval(3d)
if $rows != 3 then
return -1
endi
if $data00 != @20-09-14 00:00:00.000@ then
return -1
endi
if $data01 != 49.222222222 then
return -1
endi
if $data10 != @20-09-17 00:00:00.000@ then
print expect 20-09-17 00:00:00.000, actual: $data10
return -1
endi
if $data11 != 49.685185185 then
return -1
endi
if $data20 != @20-09-20 00:00:00.000@ then
return -1
endi
if $data21 != 49.500000000 then
return -1
endi
#define TSDB_FUNC_APERCT 7
#define TSDB_FUNC_LAST_ROW 10
#define TSDB_FUNC_TWA 14
#define TSDB_FUNC_LEASTSQR 15
#define TSDB_FUNC_ARITHM 23
#define TSDB_FUNC_DIFF 24
#define TSDB_FUNC_INTERP 28
#define TSDB_FUNC_RATE 29
#define TSDB_FUNC_IRATE 30
#define TSDB_FUNC_DERIVATIVE 32
sql_error select stddev(c1) from (select c1 from nest_tb0);
sql_error select percentile(c1, 20) from (select * from nest_tb0);
sql select avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d);
sql select top(x, 20) from (select c1 x from nest_tb0);
sql select bottom(x, 20) from (select c1 x from nest_tb0)
print ===================> complex query
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1 -c walLevel -v 1
system sh/exec.sh -n dnode1 -s start
sleep 1000
sql connect
$dbPrefix = m_di_db_ns
$tbPrefix = m_di_tb
$mtPrefix = m_di_mt
$ntPrefix = m_di_nt
$tbNum = 2
$rowNum = 200
$futureTs = 300000000000
print =============== step1: create database and tables and insert data
$i = 0
$db = $dbPrefix . $i
$mt = $mtPrefix . $i
$nt = $ntPrefix . $i
sql drop database $db -x step1
step1:
sql create database $db precision 'ns'
sql use $db
sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol int)
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
$x = 0
while $x < $rowNum
$cc = $futureTs + $x * 100 + 43
$ns = $cc . b
sql insert into $tb values (now + $ns , $x )
$x = $x + 1
endw
$i = $i + 1
endw
sql create table $nt (ts timestamp, tbcol int)
$x = 0
while $x < $rowNum
$cc = $futureTs + $x * 100 + 43
$ns = $cc . b
sql insert into $nt values (now + $ns , $x )
$x = $x + 1
endw
sleep 100
print =============== step2: select count(*) from tables
$i = 0
$tb = $tbPrefix . $i
sql select count(*) from $tb
if $data00 != $rowNum then
print expect $rowNum, actual:$data00
return -1
endi
$i = 0
$mt = $mtPrefix . $i
sql select count(*) from $mt
$mtRowNum = $tbNum * $rowNum
if $data00 != $mtRowNum then
print expect $mtRowNum, actual:$data00
return -1
endi
$i = 0
$nt = $ntPrefix . $i
sql select count(*) from $nt
if $data00 != $rowNum then
print expect $rowNum, actual:$data00
return -1
endi
print =============== step3: check nano second timestamp
$i = 0
$mt = $mtPrefix . $i
$tb = $tbPrefix . $i
sql insert into $tb values (now-43b , $x )
sql select count(*) from $tb where ts<now
if $data00 != 1 then
print expected 1, actual: $data00
endi
print =============== step4: check interval/sliding nano second
$i = 0
$mt = $mtPrefix . $i
sql_error select count(*) from $mt interval(1000b) sliding(100b)
sql_error select count(*) from $mt interval(10000000b) sliding(99999b)
sql select count(*) from $mt interval(100000000b) sliding(100000000b)
print =============== clear
sql drop database $db
sql show databases
if $rows != 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
......@@ -62,4 +62,5 @@ run general/parser/binary_escapeCharacter.sim
run general/parser/between_and.sim
run general/parser/last_cache.sim
run general/parser/nestquery.sim
run general/parser/precision_ns.sim
......@@ -134,6 +134,7 @@ run general/parser/bug.sim
run general/parser/tags_dynamically_specifiy.sim
run general/parser/set_tag_vals.sim
run general/parser/repeatAlter.sim
run general/parser/precision_ns.sim
##unsupport run general/parser/slimit_alter_tags.sim
run general/stable/disk.sim
run general/stable/dnode3.sim
......
......@@ -813,8 +813,15 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
value[length[i]] = 0;
// snprintf(value, fields[i].bytes, "%s", (char *)row[i]);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
tt = *(int64_t *)row[i] / 1000;
case TSDB_DATA_TYPE_TIMESTAMP: {
int32_t precision = taos_result_precision(pSql);
if (precision == TSDB_TIME_PRECISION_MILLI) {
tt = (*(int64_t *)row[i]) / 1000;
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
tt = (*(int64_t *)row[i]) / 1000000;
} else {
tt = (*(int64_t *)row[i]) / 1000000000;
}
/* comment out as it make testcases like select_with_tags.sim fail.
but in windows, this may cause the call to localtime crash if tt < 0,
need to find a better solution.
......@@ -829,9 +836,16 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
tp = localtime(&tt);
strftime(timeStr, 64, "%y-%m-%d %H:%M:%S", tp);
sprintf(value, "%s.%03d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000));
if (precision == TSDB_TIME_PRECISION_MILLI) {
sprintf(value, "%s.%03d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000));
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
sprintf(value, "%s.%06d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000000));
} else {
sprintf(value, "%s.%09d", timeStr, (int32_t)(*((int64_t *)row[i]) % 1000000000));
}
break;
}
default:
break;
} // end of switch
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册