提交 b55d0864 编写于 作者: haoranc's avatar haoranc

Merge branches 'dev/chr' and 'develop' of github.com:taosdata/TDengine into dev/chr

......@@ -82,6 +82,10 @@ tests/comparisonTest/opentsdb/opentsdbtest/.settings/
tests/examples/JDBC/JDBCDemo/.classpath
tests/examples/JDBC/JDBCDemo/.project
tests/examples/JDBC/JDBCDemo/.settings/
tests/script/api/batchprepare
tests/script/api/stmt
tests/script/api/stmtBatchTest
tests/script/api/stmtTest
# Emacs
# -*- mode: gitignore; -*-
......
......@@ -36,7 +36,13 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
# Get OS information and store in variable TD_OS_INFO.
#
execute_process(COMMAND chmod 777 ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh)
execute_process(COMMAND sh ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
execute_process(COMMAND readlink /bin/sh OUTPUT_VARIABLE SHELL_LINK)
MESSAGE(STATUS "The shell is: " ${SHELL_LINK})
IF (${SHELL_LINK} MATCHES "dash")
execute_process(COMMAND ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
ELSE ()
execute_process(COMMAND sh ${TD_COMMUNITY_DIR}/packaging/tools/get_os.sh "" OUTPUT_VARIABLE TD_OS_INFO)
ENDIF()
MESSAGE(STATUS "The current os is " ${TD_OS_INFO})
SET(TD_LINUX TRUE)
......
......@@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.3.1.0")
SET(TD_VER_NUMBER "2.3.2.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
......
......@@ -55,6 +55,7 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
MESSAGE("")
MESSAGE("setup deps/jemalloc, current source dir:" ${CMAKE_CURRENT_SOURCE_DIR})
MESSAGE("binary dir:" ${CMAKE_BINARY_DIR})
include(ExternalProject)
ExternalProject_Add(jemalloc
PREFIX "jemalloc"
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc
......@@ -62,6 +63,7 @@ IF (TD_LINUX_64 AND JEMALLOC_ENABLED)
CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/
BUILD_COMMAND ${MAKE}
)
INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include)
ENDIF ()
IF (${TSZ_ENABLED} MATCHES "true")
......
......@@ -328,7 +328,7 @@ TDengine的异步API均采用非阻塞调用模式。应用程序可以用多线
除 C/C++ 语言外,TDengine 的 Java 语言 JNI Connector 也提供参数绑定接口支持,具体请另外参见:[参数绑定接口的 Java 用法](https://www.taosdata.com/cn/documentation/connector/java#stmt-java)
接口相关的具体函数如下(也可以参考 [apitest.c](https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/apitest.c) 文件中使用对应函数的方式):
接口相关的具体函数如下(也可以参考 [prepare.c](https://github.com/taosdata/TDengine/blob/develop/tests/examples/c/prepare.c) 文件中使用对应函数的方式):
- `TAOS_STMT* taos_stmt_init(TAOS *taos)`
......
......@@ -299,8 +299,8 @@ keepColumnName 1
# percent of redundant data in tsdb meta will compact meta data,0 means donot compact
# tsdbMetaCompactRatio 0
# default string type used for storing JSON String, options can be binary/nchar, default is binary
# defaultJSONStrType binary
# default string type used for storing JSON String, options can be binary/nchar, default is nchar
# defaultJSONStrType nchar
# force TCP transmission
# rpcForceTcp 0
......
......@@ -194,6 +194,7 @@ fi
if [[ "$dbName" == "pro" ]]; then
sed -i "s/taos config/prodb config/g" ${top_dir}/src/util/src/tconfig.c
sed -i "s/TDengine/ProDB/g" ${top_dir}/src/dnode/src/dnodeSystem.c
fi
echo "build ${pagMode} package ..."
......@@ -213,7 +214,12 @@ else
exit 1
fi
make -j8 && ${csudo} make install
if [[ "$allocator" == "jemalloc" ]]; then
# jemalloc need compile first, so disable parallel build
make V=1 && ${csudo} make install
else
make -j8 && ${csudo} make install
fi
cd ${curr_dir}
......
......@@ -204,31 +204,31 @@ function install_jemalloc() {
/usr/bin/install -c -d /usr/local/bin
if [ -f "${binary_dir}/build/bin/jemalloc-config" ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc-config /usr/local/bin
fi
if [ -f "${binary_dir}/build/bin/jemalloc.sh" ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/bin/jemalloc.sh /usr/local/bin
fi
if [ -f "${binary_dir}/build/bin/jeprof" ]; then
/usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/bin/jeprof /usr/local/bin
fi
if [ -f "${binary_dir}/build/include/jemalloc/jemalloc.h" ]; then
/usr/bin/install -c -d /usr/local/include/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\
${csudo} /usr/bin/install -c -d /usr/local/include/jemalloc
${csudo} /usr/bin/install -c -m 644 ${binary_dir}/build/include/jemalloc/jemalloc.h\
/usr/local/include/jemalloc
fi
if [ -f "${binary_dir}/build/lib/libjemalloc.so.2" ]; then
/usr/bin/install -c -d /usr/local/lib
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
/usr/bin/install -c -d /usr/local/lib
${csudo} /usr/bin/install -c -d /usr/local/lib
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.so.2 /usr/local/lib
${csudo} ln -sf libjemalloc.so.2 /usr/local/lib/libjemalloc.so
${csudo} /usr/bin/install -c -d /usr/local/lib
[ -f ${binary_dir}/build/lib/libjemalloc.a ] &&
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc.a /usr/local/lib
[ -f ${binary_dir}/build/lib/libjemalloc_pic.a ] &&
/usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/lib/libjemalloc_pic.a /usr/local/lib
if [ -f "${binary_dir}/build/lib/pkgconfig/jemalloc.pc" ]; then
/usr/bin/install -c -d /usr/local/lib/pkgconfig
/usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\
${csudo} /usr/bin/install -c -d /usr/local/lib/pkgconfig
${csudo} /usr/bin/install -c -m 644 ${binary_dir}/build/lib/pkgconfig/jemalloc.pc\
/usr/local/lib/pkgconfig
fi
if [ -d /etc/ld.so.conf.d ]; then
......@@ -239,29 +239,28 @@ function install_jemalloc() {
fi
fi
if [ -f "${binary_dir}/build/share/doc/jemalloc/jemalloc.html" ]; then
/usr/bin/install -c -d /usr/local/share/doc/jemalloc
/usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\
${csudo} /usr/bin/install -c -d /usr/local/share/doc/jemalloc
${csudo} /usr/bin/install -c -m 644 ${binary_dir}/build/share/doc/jemalloc/jemalloc.html\
/usr/local/share/doc/jemalloc
fi
if [ -f "${binary_dir}/build/share/man/man3/jemalloc.3" ]; then
/usr/bin/install -c -d /usr/local/share/man/man3
/usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\
${csudo} /usr/bin/install -c -d /usr/local/share/man/man3
${csudo} /usr/bin/install -c -m 644 ${binary_dir}/build/share/man/man3/jemalloc.3\
/usr/local/share/man/man3
fi
fi
}
function install_avro() {
if [ "$osType" != "Darwin" ]; then
if [ -f "${binary_dir}/build/$1/libavro.so.23.0.0" ]; then
/usr/bin/install -c -d /usr/local/$1
/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1
ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23
ln -sf libavro.so.23 /usr/local/$1/libavro.so
/usr/bin/install -c -d /usr/local/$1
${csudo} /usr/bin/install -c -d /usr/local/$1
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.so.23.0.0 /usr/local/$1
${csudo} ln -sf libavro.so.23.0.0 /usr/local/$1/libavro.so.23
${csudo} ln -sf libavro.so.23 /usr/local/$1/libavro.so
${csudo} /usr/bin/install -c -d /usr/local/$1
[ -f ${binary_dir}/build/$1/libavro.a ] &&
/usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1
${csudo} /usr/bin/install -c -m 755 ${binary_dir}/build/$1/libavro.a /usr/local/$1
if [ -d /etc/ld.so.conf.d ]; then
echo "/usr/local/$1" | ${csudo} tee /etc/ld.so.conf.d/libavro.conf
......
name: tdengine
base: core20
version: '2.3.1.0'
version: '2.3.2.0'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
......
......@@ -83,6 +83,11 @@ typedef struct SJoinSupporter {
int32_t totalLen;
int32_t num;
SArray* pVgroupTables;
int16_t fillType; // final result fill type
int64_t * fillVal; // default value for fill
int32_t numOfFillVal; // fill value size
} SJoinSupporter;
......@@ -119,7 +124,8 @@ typedef struct SBlockKeyInfo {
int32_t converToStr(char *str, int type, void *buf, int32_t bufSize, int32_t *len);
int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOffset, SName* name, STableMeta* pTableMeta, STableDataBlocks** dataBlocks);
void tscDestroyDataBlock(SSqlObj *pSql, STableDataBlocks* pDataBlock, bool removeMeta);
int32_t tscCreateDataBlockData(STableDataBlocks* dataBuf, size_t defaultSize, int32_t rowSize, int32_t startOffset);
void tscDestroyDataBlock(SSqlObj *pSql, STableDataBlocks* pDataBlock, bool removeMeta);
void tscSortRemoveDataBlockDupRowsRaw(STableDataBlocks* dataBuf);
int tscSortRemoveDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* pBlkKeyInfo);
int32_t tsSetBlockInfo(SSubmitBlk *pBlocks, const STableMeta *pTableMeta, int32_t numOfRows);
......@@ -147,6 +153,7 @@ int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, i
* @return
*/
bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo);
bool tscIsTWAQuery(SQueryInfo* pQueryInfo);
bool tscIsIrateQuery(SQueryInfo* pQueryInfo);
bool tscQueryContainsFunction(SQueryInfo* pQueryInfo, int16_t functionId);
......
......@@ -237,7 +237,7 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) {
return;
}
if (pRes->qId == 0) {
if (pRes->qId == 0 && pSql->cmd.command != TSDB_SQL_RETRIEVE_EMPTY_RESULT) {
tscError("qhandle is invalid");
pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE;
tscAsyncResultOnError(pSql);
......
......@@ -967,7 +967,6 @@ SSDataBlock* doGlobalAggregate(void* param, bool* newgroup) {
if (pOperator->pRuntimeEnv->pQueryAttr->order.order == TSDB_ORDER_DESC) {
SWAP(w->skey, w->ekey, TSKEY);
assert(w->skey <= w->ekey);
}
}
}
......
......@@ -195,8 +195,9 @@ static int32_t parseTelnetMetricValue(TAOS_SML_KV **pKVs, int *num_kvs, const ch
}
tfree(value);
pVal->key = tcalloc(sizeof(key), 1);
pVal->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1);
memcpy(pVal->key, key, sizeof(key));
addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key));
*num_kvs += 1;
*index = cur + 1;
......@@ -881,8 +882,9 @@ static int32_t parseMetricValueFromJSON(cJSON *root, TAOS_SML_KV **pKVs, int *nu
return ret;
}
pVal->key = tcalloc(sizeof(key), 1);
pVal->key = tcalloc(sizeof(key) + TS_ESCAPE_CHAR_SIZE, 1);
memcpy(pVal->key, key, sizeof(key));
addEscapeCharToString(pVal->key, (int32_t)strlen(pVal->key));
*num_kvs += 1;
return TSDB_CODE_SUCCESS;
......
......@@ -48,12 +48,14 @@ typedef struct SMultiTbStmt {
bool nameSet;
bool tagSet;
bool subSet;
bool tagColSet;
uint64_t currentUid;
char *sqlstr;
uint32_t tbNum;
SStrToken tbname;
SStrToken stbname;
SStrToken values;
SStrToken tagCols;
SArray *tags;
STableDataBlocks *lastBlock;
SHashObj *pTableHash;
......@@ -1250,6 +1252,12 @@ static void insertBatchClean(STscStmt* pStmt) {
pCmd->insertParam.pDataBlocks = tscDestroyBlockArrayList(pSql, pCmd->insertParam.pDataBlocks);
pCmd->insertParam.numOfTables = 0;
STableDataBlocks** p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, NULL);
while(p) {
tfree((*p)->pData);
p = taosHashIterate(pCmd->insertParam.pTableBlockHashList, p);
}
taosHashClear(pCmd->insertParam.pTableBlockHashList);
tscFreeSqlResult(pSql);
tscFreeSubobj(pSql);
......@@ -1343,9 +1351,40 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
pStmt->mtb.stbname = sToken;
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
if (sToken.n <= 0 || ((sToken.type != TK_TAGS) && (sToken.type != TK_LP))) {
tscError("invalid token, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "invalid token", sToken.z ? sToken.z : pCmd->insertParam.sql);
}
// ... (tag_col_list) TAGS(tag_val_list) ...
int32_t tagColsCnt = 0;
if (sToken.type == TK_LP) {
pStmt->mtb.tagColSet = true;
pStmt->mtb.tagCols = sToken;
int32_t tagColsStart = index;
while (1) {
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.type == TK_ILLEGAL) {
return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z);
}
if (sToken.type == TK_ID) {
++tagColsCnt;
}
if (sToken.type == TK_RP) {
break;
}
}
if (tagColsCnt == 0) {
tscError("tag column list expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "tag column list expected", pCmd->insertParam.sql);
}
pStmt->mtb.tagCols.n = index - tagColsStart + 1;
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || sToken.type != TK_TAGS) {
tscError("keyword TAGS expected, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "keyword TAGS expected", sToken.z ? sToken.z : pCmd->insertParam.sql);
}
}
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
......@@ -1385,6 +1424,11 @@ int stmtParseInsertTbTags(SSqlObj* pSql, STscStmt* pStmt) {
return tscSQLSyntaxErrMsg(pCmd->payload, "no tags", pCmd->insertParam.sql);
}
if (tagColsCnt > 0 && taosArrayGetSize(pStmt->mtb.tags) != tagColsCnt) {
tscError("not match tags, sql:%s", pCmd->insertParam.sql);
return tscSQLSyntaxErrMsg(pCmd->payload, "not match tags", pCmd->insertParam.sql);
}
sToken = tStrGetToken(pCmd->insertParam.sql, &index, false);
if (sToken.n <= 0 || (sToken.type != TK_VALUES && sToken.type != TK_LP)) {
tscError("sql error, sql:%s", pCmd->insertParam.sql);
......@@ -1407,7 +1451,13 @@ int stmtGenInsertStatement(SSqlObj* pSql, STscStmt* pStmt, const char* name, TAO
int32_t j = 0;
while (1) {
len = (size_t)snprintf(str, size - 1, "insert into %s using %.*s tags(", name, pStmt->mtb.stbname.n, pStmt->mtb.stbname.z);
if (pStmt->mtb.tagColSet) {
len = (size_t)snprintf(str, size - 1, "insert into %s using %.*s %.*s tags(",
name, pStmt->mtb.stbname.n, pStmt->mtb.stbname.z, pStmt->mtb.tagCols.n, pStmt->mtb.tagCols.z);
} else {
len = (size_t)snprintf(str, size - 1, "insert into %s using %.*s tags(", name, pStmt->mtb.stbname.n, pStmt->mtb.stbname.z);
}
if (len >= (size -1)) {
size *= 2;
free(str);
......@@ -1659,6 +1709,13 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
STMT_RET(TSDB_CODE_TSC_APP_ERROR);
}
if ((*t1)->pData == NULL) {
code = tscCreateDataBlockData(*t1, TSDB_PAYLOAD_SIZE, (*t1)->pTableMeta->tableInfo.rowSize, sizeof(SSubmitBlk));
if (code != TSDB_CODE_SUCCESS) {
STMT_RET(code);
}
}
SSubmitBlk* pBlk = (SSubmitBlk*) (*t1)->pData;
pCmd->batchSize = pBlk->numOfRows;
if (pBlk->numOfRows == 0) {
......@@ -1784,7 +1841,6 @@ int taos_stmt_set_tbname_tags(TAOS_STMT* stmt, const char* name, TAOS_BIND* tags
STMT_RET(code);
}
int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
......@@ -1792,8 +1848,6 @@ int taos_stmt_set_sub_tbname(TAOS_STMT* stmt, const char* name) {
return taos_stmt_set_tbname_tags(stmt, name, NULL);
}
int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
......@@ -1801,7 +1855,6 @@ int taos_stmt_set_tbname(TAOS_STMT* stmt, const char* name) {
return taos_stmt_set_tbname_tags(stmt, name, NULL);
}
int taos_stmt_close(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
if (pStmt == NULL || pStmt->taos == NULL) {
......@@ -1868,7 +1921,6 @@ int taos_stmt_bind_param(TAOS_STMT* stmt, TAOS_BIND* bind) {
}
}
int taos_stmt_bind_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind) {
STscStmt* pStmt = (STscStmt*)stmt;
......@@ -1932,8 +1984,6 @@ int taos_stmt_bind_single_param_batch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, in
STMT_RET(insertStmtBindParamBatch(pStmt, bind, colIdx));
}
int taos_stmt_add_batch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_CHECK
......@@ -2086,7 +2136,6 @@ int taos_stmt_get_param(TAOS_STMT *stmt, int idx, int *type, int *bytes) {
}
}
char *taos_stmt_errstr(TAOS_STMT *stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
......@@ -2097,8 +2146,6 @@ char *taos_stmt_errstr(TAOS_STMT *stmt) {
return taos_errstr(pStmt->pSql);
}
const char *taos_data_type(int type) {
switch (type) {
case TSDB_DATA_TYPE_NULL: return "TSDB_DATA_TYPE_NULL";
......@@ -2115,4 +2162,3 @@ const char *taos_data_type(int type) {
default: return "UNKNOWN";
}
}
此差异已折叠。
......@@ -916,7 +916,9 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->window.skey = htobe64(query.window.skey);
pQueryMsg->window.ekey = htobe64(query.window.ekey);
pQueryMsg->range.skey = htobe64(query.range.skey);
pQueryMsg->range.ekey = htobe64(query.range.ekey);
pQueryMsg->order = htons(query.order.order);
pQueryMsg->orderColId = htons(query.order.orderColId);
pQueryMsg->fillType = htons(query.fillType);
......@@ -975,7 +977,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
if (pQueryInfo->colCond && taosArrayGetSize(pQueryInfo->colCond) > 0 && !onlyQueryTags(&query) ) {
STblCond *pCond = tsGetTableFilter(pQueryInfo->colCond, pTableMeta->id.uid, 0);
if (pCond != NULL && pCond->cond != NULL) {
pQueryMsg->colCondLen = htons(pCond->len);
pQueryMsg->colCondLen = htonl(pCond->len);
memcpy(pMsg, pCond->cond, pCond->len);
pMsg += pCond->len;
......@@ -1056,7 +1058,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid);
if (pCond != NULL && pCond->cond != NULL) {
pQueryMsg->tagCondLen = htons(pCond->len);
pQueryMsg->tagCondLen = htonl(pCond->len);
memcpy(pMsg, pCond->cond, pCond->len);
pMsg += pCond->len;
......
......@@ -468,7 +468,7 @@ SSqlObj* recreateSqlObj(SSub* pSub) {
}
registerSqlObj(pSql);
pSql->rootObj = pSql;
code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tsem_wait(&pSub->sem);
......
......@@ -394,6 +394,12 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) {
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
pSupporter->limit = pQueryInfo->limit;
if (tscIsPointInterpQuery(pQueryInfo)) {
pSupporter->fillType = pQueryInfo->fillType;
pSupporter->fillVal = pQueryInfo->fillVal;
pSupporter->numOfFillVal = pQueryInfo->numOfFillVal;
}
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, index);
pSupporter->uid = pTableMetaInfo->pTableMeta->id.uid;
assert (pSupporter->uid != 0);
......@@ -579,6 +585,13 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
pQueryInfo->fieldsInfo = pSupporter->fieldsInfo;
pQueryInfo->groupbyExpr = pSupporter->groupInfo;
pQueryInfo->pUpstream = taosArrayInit(4, sizeof(POINTER_BYTES));
if (tscIsPointInterpQuery(pQueryInfo)) {
pQueryInfo->fillType = pSupporter->fillType;
pQueryInfo->numOfFillVal = pSupporter->numOfFillVal;
pQueryInfo->fillVal = malloc(pQueryInfo->numOfFillVal * sizeof(*pSupporter->fillVal));
memcpy(pQueryInfo->fillVal, pSupporter->fillVal, sizeof(*pSupporter->fillVal) * pQueryInfo->numOfFillVal);
}
assert(pNew->subState.numOfSub == 0 && pQueryInfo->numOfTables == 1);
......
......@@ -187,7 +187,7 @@ bool tscQueryTags(SQueryInfo* pQueryInfo) {
continue;
}
if (functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_TID_TAG) {
if (functId != TSDB_FUNC_TAGPRJ && functId != TSDB_FUNC_TID_TAG && functId != TSDB_FUNC_BLKINFO) {
return false;
}
}
......@@ -367,7 +367,7 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) {
assert(pExpr != NULL);
int32_t functionId = pExpr->base.functionId;
if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TS) {
if (functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAG_DUMMY || functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TS_DUMMY) {
continue;
}
......@@ -379,6 +379,23 @@ bool tscIsPointInterpQuery(SQueryInfo* pQueryInfo) {
return true;
}
bool tscGetPointInterpQuery(SQueryInfo* pQueryInfo) {
size_t size = tscNumOfExprs(pQueryInfo);
for (int32_t i = 0; i < size; ++i) {
SExprInfo* pExpr = tscExprGet(pQueryInfo, i);
assert(pExpr != NULL);
int32_t functionId = pExpr->base.functionId;
if (functionId == TSDB_FUNC_INTERP) {
return true;
}
}
return false;
}
bool tsIsArithmeticQueryOnAggResult(SQueryInfo* pQueryInfo) {
if (tscIsProjectionQuery(pQueryInfo)) {
return false;
......@@ -942,23 +959,30 @@ SSDataBlock* doGetDataBlock(void* param, bool* newgroup) {
pBlock->info.rows = pRes->numOfRows;
if (pRes->numOfRows != 0) {
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo);
*newgroup = false;
return pBlock;
if (pBlock->info.rows > 0) {
*newgroup = false;
return pBlock;
}
}
// No data block exists. So retrieve and transfer it into to SSDataBlock
TAOS_ROW pRow = NULL;
taos_fetch_block(pSql, &pRow);
SSDataBlock* result = NULL;
do {
// No data block exists. So retrieve and transfer it into to SSDataBlock
TAOS_ROW pRow = NULL;
taos_fetch_block(pSql, &pRow);
if (pRes->numOfRows == 0) {
pOperator->status = OP_EXEC_DONE;
return NULL;
}
if (pRes->numOfRows == 0) {
pOperator->status = OP_EXEC_DONE;
result = NULL;
break;
}
pBlock->info.rows = pRes->numOfRows;
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo);
*newgroup = false;
result = pBlock;
} while (result->info.rows == 0);
pBlock->info.rows = pRes->numOfRows;
doSetupSDataBlock(pRes, pBlock, pInput->pFilterInfo);
*newgroup = false;
return pBlock;
return result;
}
static void fetchNextBlockIfCompleted(SOperatorInfo* pOperator, bool* newgroup) {
......@@ -1838,6 +1862,32 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int32_t code = tscCreateDataBlockData(dataBuf, defaultSize, rowSize, startOffset);
if (code != TSDB_CODE_SUCCESS) {
tfree(dataBuf);
return code;
}
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
SParsedDataColInfo* pColInfo = &dataBuf->boundColumnInfo;
SSchema* pSchema = tscGetTableSchema(dataBuf->pTableMeta);
tscSetBoundColumnInfo(pColInfo, pSchema, dataBuf->pTableMeta->tableInfo.numOfColumns);
dataBuf->vgId = dataBuf->pTableMeta->vgId;
tNameAssign(&dataBuf->tableName, name);
assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
int32_t tscCreateDataBlockData(STableDataBlocks* dataBuf, size_t defaultSize, int32_t rowSize, int32_t startOffset) {
assert(dataBuf != NULL);
dataBuf->nAllocSize = (uint32_t)defaultSize;
dataBuf->headerSize = startOffset;
......@@ -1850,30 +1900,16 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff
dataBuf->pData = malloc(dataBuf->nAllocSize);
if (dataBuf->pData == NULL) {
tscError("failed to allocated memory, reason:%s", strerror(errno));
tfree(dataBuf);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
memset(dataBuf->pData, 0, sizeof(SSubmitBlk));
//Here we keep the tableMeta to avoid it to be remove by other threads.
dataBuf->pTableMeta = tscTableMetaDup(pTableMeta);
SParsedDataColInfo* pColInfo = &dataBuf->boundColumnInfo;
SSchema* pSchema = tscGetTableSchema(dataBuf->pTableMeta);
tscSetBoundColumnInfo(pColInfo, pSchema, dataBuf->pTableMeta->tableInfo.numOfColumns);
dataBuf->ordered = true;
dataBuf->prevTS = INT64_MIN;
dataBuf->rowSize = rowSize;
dataBuf->size = startOffset;
dataBuf->tsSource = -1;
dataBuf->vgId = dataBuf->pTableMeta->vgId;
tNameAssign(&dataBuf->tableName, name);
assert(defaultSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
return TSDB_CODE_SUCCESS;
}
......@@ -3422,7 +3458,7 @@ int32_t tscQueryInfoCopy(SQueryInfo* pQueryInfo, const SQueryInfo* pSrc) {
pQueryInfo->clauseLimit = pSrc->clauseLimit;
pQueryInfo->prjOffset = pSrc->prjOffset;
pQueryInfo->numOfTables = 0;
pQueryInfo->window = pSrc->window;
pQueryInfo->range = pSrc->range;
pQueryInfo->sessionWindow = pSrc->sessionWindow;
pQueryInfo->pTableMetaInfo = NULL;
pQueryInfo->multigroupResult = pSrc->multigroupResult;
......@@ -3815,6 +3851,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
pNewQueryInfo->type = pQueryInfo->type;
pNewQueryInfo->window = pQueryInfo->window;
pNewQueryInfo->range = pQueryInfo->range;
pNewQueryInfo->limit = pQueryInfo->limit;
pNewQueryInfo->slimit = pQueryInfo->slimit;
pNewQueryInfo->order = pQueryInfo->order;
......@@ -5034,6 +5071,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt
pQueryAttr->fillType = pQueryInfo->fillType;
pQueryAttr->havingNum = pQueryInfo->havingFieldNum;
pQueryAttr->pUdfInfo = pQueryInfo->pUdfInfo;
pQueryAttr->range = pQueryInfo->range;
if (pQueryInfo->order.order == TSDB_ORDER_ASC) { // TODO refactor
pQueryAttr->window = pQueryInfo->window;
......@@ -5333,4 +5371,3 @@ char* cloneCurrentDBName(SSqlObj* pSql) {
return p;
}
......@@ -253,9 +253,10 @@ int dataColAppendVal(SDataCol *pCol, const void *value, int numOfRows, int maxPo
}
if(tdAllocMemForCol(pCol, maxPoints) < 0) return -1;
if (numOfRows > 0) {
if (((rowOffset == 0) && (numOfRows > 0)) || ((rowOffset == -1) && (numOfRows >= 0))) {
// Find the first not null value, fill all previouse values as NULL
dataColSetNEleNull(pCol, numOfRows);
dataColSetNEleNull(pCol, numOfRows - rowOffset);
}
}
......@@ -463,9 +464,7 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
int rcol = 0;
int dcol = 0;
while (dcol < pCols->numOfCols) {
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
......@@ -476,14 +475,22 @@ static void tdAppendDataRowToDataCol(SDataRow row, STSchema *pSchema, SDataCols
STColumn *pRowCol = schemaColAt(pSchema, rcol);
if (pRowCol->colId == pDataCol->colId) {
void *value = tdGetRowDataOfCol(row, pRowCol->type, pRowCol->offset + TD_DATA_ROW_HEAD_SIZE);
if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
if (rowOffset == 0) {
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
} else if (rowOffset == -1) {
// for update 2
if (!isNull(value, pDataCol->type)) {
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
}
} else {
ASSERT(0);
}
dcol++;
rcol++;
} else if (pRowCol->colId < pDataCol->colId) {
rcol++;
} else {
if(forceSetNull || setCol) {
if(forceSetNull) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
}
dcol++;
......@@ -501,7 +508,6 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
int nRowCols = kvRowNCols(row);
while (dcol < pCols->numOfCols) {
bool setCol = 0;
SDataCol *pDataCol = &(pCols->cols[dcol]);
if (rcol >= nRowCols || rcol >= schemaNCols(pSchema)) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
......@@ -513,14 +519,22 @@ static void tdAppendKvRowToDataCol(SKVRow row, STSchema *pSchema, SDataCols *pCo
if (colIdx->colId == pDataCol->colId) {
void *value = tdGetKvRowDataOfCol(row, colIdx->offset);
if(!isNull(value, pDataCol->type)) setCol = 1;
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
if (rowOffset == 0) {
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
} else if (rowOffset == -1) {
// for update 2
if (!isNull(value, pDataCol->type)) {
dataColAppendVal(pDataCol, value, pCols->numOfRows, pCols->maxPoints, rowOffset);
}
} else {
ASSERT(0);
}
++dcol;
++rcol;
} else if (colIdx->colId < pDataCol->colId) {
++rcol;
} else {
if(forceSetNull || setCol) {
if (forceSetNull) {
dataColAppendVal(pDataCol, getNullValue(pDataCol->type), pCols->numOfRows, pCols->maxPoints, rowOffset);
}
++dcol;
......
......@@ -73,7 +73,7 @@ int32_t tsMaxBinaryDisplayWidth = 30;
* -1: all data are not compressed
* other values: if the message payload size is greater than the tsCompressMsgSize, the message will be compressed.
*/
int32_t tsCompressMsgSize = -1;
int32_t tsCompressMsgSize = 512 * 1024;
/* denote if server needs to compress the retrieved column data before adding to the rpc response message body.
* 0: all data are compressed
......@@ -289,7 +289,7 @@ char Compressor[32] = "ZSTD_COMPRESSOR"; // ZSTD_COMPRESSOR or GZIP_COMPRES
int8_t tsDeadLockKillQuery = 0;
// default JSON string type
char tsDefaultJSONStrType[7] = "binary";
char tsDefaultJSONStrType[7] = "nchar";
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; //user defined child table name can be specified in tag value. If set to empty system will generate table name using MD5 hash.
int32_t (*monStartSystemFp)() = NULL;
......
......@@ -163,8 +163,12 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
//get precisionin parameter restultset
//get precision in restultset
[DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
static extern public int ResultPrecision(IntPtr taos);
//schemaless API
[DllImport("taos",SetLastError = true, EntryPoint = "taos_schemaless_insert", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr SchemalessInsert(IntPtr taos, string[] lines, int numLines, int protocol, int precision);
}
}
......@@ -5,6 +5,7 @@ import com.taosdata.jdbc.TSDBErrorNumbers;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpEntity;
import org.apache.http.NoHttpResponseException;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.*;
......
......@@ -479,14 +479,15 @@ typedef struct {
bool stateWindow; // state window flag
STimeWindow window;
STimeWindow range; // result range for interp query
int32_t numOfTables;
int16_t order;
int16_t orderColId;
int16_t numOfCols; // the number of columns will be load from vnode
SInterval interval;
SSessionWindow sw; // session window
uint16_t tagCondLen; // tag length in current query
uint16_t colCondLen; // column length in current query
uint32_t tagCondLen; // tag length in current query
int32_t colCondLen; // column length in current query
int16_t numOfGroupCols; // num of group by columns
int16_t orderByIdx;
int16_t orderType; // used in group by xx order by xxx
......
......@@ -405,6 +405,7 @@ void tsdbDestroyCommitQueue();
int tsdbSyncCommit(STsdbRepo *repo);
void tsdbIncCommitRef(int vgId);
void tsdbDecCommitRef(int vgId);
void tsdbSwitchTable(TsdbQueryHandleT pQueryHandle);
// For TSDB file sync
int tsdbSyncSend(void *pRepo, SOCKET socketFd);
......
......@@ -142,77 +142,78 @@
#define TK_DISTINCT 124
#define TK_FROM 125
#define TK_VARIABLE 126
#define TK_INTERVAL 127
#define TK_EVERY 128
#define TK_SESSION 129
#define TK_STATE_WINDOW 130
#define TK_FILL 131
#define TK_SLIDING 132
#define TK_ORDER 133
#define TK_BY 134
#define TK_ASC 135
#define TK_GROUP 136
#define TK_HAVING 137
#define TK_LIMIT 138
#define TK_OFFSET 139
#define TK_SLIMIT 140
#define TK_SOFFSET 141
#define TK_WHERE 142
#define TK_RESET 143
#define TK_QUERY 144
#define TK_SYNCDB 145
#define TK_ADD 146
#define TK_COLUMN 147
#define TK_MODIFY 148
#define TK_TAG 149
#define TK_CHANGE 150
#define TK_SET 151
#define TK_KILL 152
#define TK_CONNECTION 153
#define TK_STREAM 154
#define TK_COLON 155
#define TK_ABORT 156
#define TK_AFTER 157
#define TK_ATTACH 158
#define TK_BEFORE 159
#define TK_BEGIN 160
#define TK_CASCADE 161
#define TK_CLUSTER 162
#define TK_CONFLICT 163
#define TK_COPY 164
#define TK_DEFERRED 165
#define TK_DELIMITERS 166
#define TK_DETACH 167
#define TK_EACH 168
#define TK_END 169
#define TK_EXPLAIN 170
#define TK_FAIL 171
#define TK_FOR 172
#define TK_IGNORE 173
#define TK_IMMEDIATE 174
#define TK_INITIALLY 175
#define TK_INSTEAD 176
#define TK_KEY 177
#define TK_OF 178
#define TK_RAISE 179
#define TK_REPLACE 180
#define TK_RESTRICT 181
#define TK_ROW 182
#define TK_STATEMENT 183
#define TK_TRIGGER 184
#define TK_VIEW 185
#define TK_IPTOKEN 186
#define TK_SEMI 187
#define TK_NONE 188
#define TK_PREV 189
#define TK_LINEAR 190
#define TK_IMPORT 191
#define TK_TBNAME 192
#define TK_JOIN 193
#define TK_INSERT 194
#define TK_INTO 195
#define TK_VALUES 196
#define TK_FILE 197
#define TK_RANGE 127
#define TK_INTERVAL 128
#define TK_EVERY 129
#define TK_SESSION 130
#define TK_STATE_WINDOW 131
#define TK_FILL 132
#define TK_SLIDING 133
#define TK_ORDER 134
#define TK_BY 135
#define TK_ASC 136
#define TK_GROUP 137
#define TK_HAVING 138
#define TK_LIMIT 139
#define TK_OFFSET 140
#define TK_SLIMIT 141
#define TK_SOFFSET 142
#define TK_WHERE 143
#define TK_RESET 144
#define TK_QUERY 145
#define TK_SYNCDB 146
#define TK_ADD 147
#define TK_COLUMN 148
#define TK_MODIFY 149
#define TK_TAG 150
#define TK_CHANGE 151
#define TK_SET 152
#define TK_KILL 153
#define TK_CONNECTION 154
#define TK_STREAM 155
#define TK_COLON 156
#define TK_ABORT 157
#define TK_AFTER 158
#define TK_ATTACH 159
#define TK_BEFORE 160
#define TK_BEGIN 161
#define TK_CASCADE 162
#define TK_CLUSTER 163
#define TK_CONFLICT 164
#define TK_COPY 165
#define TK_DEFERRED 166
#define TK_DELIMITERS 167
#define TK_DETACH 168
#define TK_EACH 169
#define TK_END 170
#define TK_EXPLAIN 171
#define TK_FAIL 172
#define TK_FOR 173
#define TK_IGNORE 174
#define TK_IMMEDIATE 175
#define TK_INITIALLY 176
#define TK_INSTEAD 177
#define TK_KEY 178
#define TK_OF 179
#define TK_RAISE 180
#define TK_REPLACE 181
#define TK_RESTRICT 182
#define TK_ROW 183
#define TK_STATEMENT 184
#define TK_TRIGGER 185
#define TK_VIEW 186
#define TK_IPTOKEN 187
#define TK_SEMI 188
#define TK_NONE 189
#define TK_PREV 190
#define TK_LINEAR 191
#define TK_IMPORT 192
#define TK_TBNAME 193
#define TK_JOIN 194
#define TK_INSERT 195
#define TK_INTO 196
#define TK_VALUES 197
#define TK_FILE 198
......
......@@ -50,6 +50,47 @@ typedef struct {
#define TSDB_DATA_TYPE_POINTER_ARRAY (1000)
#define TSDB_DATA_TYPE_VALUE_ARRAY (1001)
#define COPY_DATA(dst, src) *((int64_t *)(dst)) = *((int64_t *)(src))
#define COPY_TYPED_DATA(_v, _type, _data) \
do { \
switch (_type) { \
case TSDB_DATA_TYPE_BOOL: \
case TSDB_DATA_TYPE_TINYINT: \
(*(int8_t *)_v) = GET_INT8_VAL(_data); \
break; \
case TSDB_DATA_TYPE_UTINYINT: \
(*(uint8_t *)_v) = GET_UINT8_VAL(_data); \
break; \
case TSDB_DATA_TYPE_SMALLINT: \
(*(int16_t *)_v) = GET_INT16_VAL(_data); \
break; \
case TSDB_DATA_TYPE_USMALLINT: \
(*(uint16_t *)_v) = GET_UINT16_VAL(_data); \
break; \
case TSDB_DATA_TYPE_TIMESTAMP: \
case TSDB_DATA_TYPE_BIGINT: \
(*(int64_t *)_v) = (GET_INT64_VAL(_data)); \
break; \
case TSDB_DATA_TYPE_UBIGINT: \
(*(uint64_t *)_v) = (GET_UINT64_VAL(_data)); \
break; \
case TSDB_DATA_TYPE_FLOAT: \
(*(float *)_v) = GET_FLOAT_VAL(_data); \
break; \
case TSDB_DATA_TYPE_DOUBLE: \
(*(double *)_v) = GET_DOUBLE_VAL(_data); \
break; \
case TSDB_DATA_TYPE_UINT: \
(*(uint32_t *)_v) = GET_UINT32_VAL(_data); \
break; \
default: \
(*(int32_t *)_v) = GET_INT32_VAL(_data); \
break; \
} \
} while (0)
#define GET_TYPED_DATA(_v, _finalType, _type, _data) \
do { \
switch (_type) { \
......
......@@ -77,6 +77,14 @@ ELSEIF (TD_WINDOWS)
AUX_SOURCE_DIRECTORY(./src SRC)
ADD_EXECUTABLE(taosdemo ${SRC})
SET_SOURCE_FILES_PROPERTIES(./src/demoUtil.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoData.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoInsert.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoCommandOpt.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoQuery.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoMain.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoSubscribe.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoOutput.c PROPERTIES COMPILE_FLAGS -w)
SET_SOURCE_FILES_PROPERTIES(./src/demoJsonOpt.c PROPERTIES COMPILE_FLAGS -w)
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosdemo taos_static cJson lua)
ELSE ()
......
......@@ -204,13 +204,14 @@ int getChildNameOfSuperTableWithLimitAndOffset(TAOS *taos, char *dbName,
int64_t childTblCount = (limit < 0) ? DEFAULT_CHILDTABLES : limit;
int64_t count = 0;
char * childTblName = *childTblNameOfSuperTbl;
char * pTblName = childTblName;
if (childTblName == NULL) {
childTblName = (char *)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN);
if (childTblName == NULL) {
errorPrint("%s", "failed to allocate memory\n");
}
}
char *pTblName = childTblName;
snprintf(limitBuf, 100, " limit %" PRId64 " offset %" PRIu64 "", limit,
offset);
......
......@@ -1139,7 +1139,7 @@ static int32_t mnodeAlterDbFp(SMnodeMsg *pMsg) {
return sdbUpdateRow(&row);
}
//bnNotify();
bnNotify();
return TSDB_CODE_MND_ACTION_IN_PROGRESS;
}
......
......@@ -121,7 +121,7 @@ static int32_t mnodeProcessShowMsg(SMnodeMsg *pMsg) {
}
if (!tsMnodeShowMetaFp[pShowMsg->type] || !tsMnodeShowRetrieveFp[pShowMsg->type]) {
mError("show type:%s is not support", mnodeGetShowType(pShowMsg->type));
mWarn("show type:%s is not support", mnodeGetShowType(pShowMsg->type));
return TSDB_CODE_COM_OPS_NOT_SUPPORT;
}
......
......@@ -742,19 +742,6 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
return 0;
}
static bool mnodeFilterVgroups(SVgObj *pVgroup, STableObj *pTable) {
if (NULL == pTable || pTable->type == TSDB_SUPER_TABLE) {
return true;
}
SCTableObj *pCTable = (SCTableObj *)pTable;
if (pVgroup->vgId == pCTable->vgId) {
return true;
} else {
return false;
}
}
static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
int32_t numOfRows = 0;
SVgObj *pVgroup = NULL;
......@@ -770,11 +757,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
return 0;
}
STableObj *pTable = NULL;
if (pShow->payloadLen > 0 ) {
pTable = mnodeGetTable(pShow->payload);
}
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextVgroup(pShow->pIter, &pVgroup);
if (pVgroup == NULL) break;
......@@ -784,11 +766,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
continue;
}
if (!mnodeFilterVgroups(pVgroup, pTable)) {
mnodeDecVgroupRef(pVgroup);
continue;
}
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
......@@ -842,7 +819,6 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
mnodeVacuumResult(data, pShow->numOfColumns, numOfRows, rows, pShow);
pShow->numOfReads += numOfRows;
mnodeDecTableRef(pTable);
mnodeDecDbRef(pDb);
return numOfRows;
......
......@@ -94,6 +94,8 @@ typedef struct SSessionWindow {
} SSessionWindow;
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision);
int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
......
......@@ -424,29 +424,44 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
}
} //end switch fromPrecision
end_:
if (tempResult > (double)INT64_MAX) return INT64_MAX;
if (tempResult < (double)INT64_MIN) return INT64_MIN + 1; // INT64_MIN means NULL
if (tempResult >= (double)INT64_MAX) return INT64_MAX;
if (tempResult <= (double)INT64_MIN) return INT64_MIN + 1; // INT64_MIN means NULL
return time;
}
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
switch (unit) {
case 's':
case 's':{
double temp = ((double)val) * MILLISECOND_PER_SECOND;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_SECOND, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'm':
}
case 'm':{
double temp = ((double)val) * MILLISECOND_PER_MINUTE;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_MINUTE, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'h':
}
case 'h':{
double temp = ((double)val) * MILLISECOND_PER_HOUR;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_HOUR, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'd':
}
case 'd': {
double temp = ((double)val) * MILLISECOND_PER_DAY;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_DAY, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
case 'w':
}
case 'w': {
double temp = ((double)val) * MILLISECOND_PER_WEEK;
if (temp >= (double)INT64_MAX || temp <= (double)INT64_MIN) return -1;
(*result) = convertTimePrecision(val * MILLISECOND_PER_WEEK, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
}
case 'a':
(*result) = convertTimePrecision(val, TSDB_TIME_PRECISION_MILLI, timePrecision);
break;
......@@ -533,6 +548,27 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
return (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
}
int64_t taosTimeSub(int64_t t, int64_t duration, char unit, int32_t precision) {
if (duration == 0) {
return t;
}
if (unit == 'y') {
duration *= 12;
} else if (unit != 'n') {
return t - duration;
}
struct tm tm;
time_t tt = (time_t)(t / TSDB_TICK_PER_SECOND(precision));
localtime_r(&tt, &tm);
int mon = tm.tm_year * 12 + tm.tm_mon - (int)duration;
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
return (int64_t)(mktime(&tm) * TSDB_TICK_PER_SECOND(precision));
}
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision) {
if (ekey < skey) {
int64_t tmp = ekey;
......
......@@ -192,6 +192,7 @@ typedef struct SQLFunctionCtx {
char * pOutput; // final result output buffer, point to sdata->data
uint8_t currentStage; // record current running step, default: 0
int64_t startTs; // timestamp range of current query when function is executed on a specific data block
int64_t endTs;
int32_t numOfParams;
tVariant param[4]; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
int64_t *ptsList; // corresponding timestamp array list
......
......@@ -63,6 +63,10 @@ enum {
QUERY_OVER = 0x4u,
};
enum {
OPTION_SWITCH_TABLE = 1,
};
typedef struct SResultRowPool {
int32_t elemSize;
int32_t blockSize;
......@@ -241,6 +245,7 @@ typedef struct SQueryAttr {
int16_t numOfTags;
STimeWindow window;
STimeWindow range;
SInterval interval;
SSessionWindow sw;
int16_t precision;
......@@ -277,6 +282,7 @@ typedef struct SQueryAttr {
} SQueryAttr;
typedef SSDataBlock* (*__operator_fn_t)(void* param, bool* newgroup);
typedef void (*__operator_notify_fn_t)(void* param, int32_t option);
typedef void (*__optr_cleanup_fn_t)(void* param, int32_t num);
struct SOperatorInfo;
......@@ -348,7 +354,7 @@ enum OPERATOR_TYPE_E {
OP_Distinct = 20,
OP_Join = 21,
OP_StateWindow = 22,
OP_AllTimeWindow = 23,
OP_TimeEvery = 23,
OP_AllMultiTableTimeInterval = 24,
OP_Order = 25,
};
......@@ -363,10 +369,11 @@ typedef struct SOperatorInfo {
SExprInfo *pExpr;
SQueryRuntimeEnv *pRuntimeEnv;
struct SOperatorInfo **upstream; // upstream pointer list
int32_t numOfUpstream; // number of upstream. The value is always ONE expect for join operator
__operator_fn_t exec;
__optr_cleanup_fn_t cleanup;
struct SOperatorInfo **upstream; // upstream pointer list
int32_t numOfUpstream; // number of upstream. The value is always ONE expect for join operator
__operator_fn_t exec;
__operator_notify_fn_t notify;
__optr_cleanup_fn_t cleanup;
} SOperatorInfo;
enum {
......@@ -479,6 +486,21 @@ typedef struct SProjectOperatorInfo {
SSDataBlock *existDataBlock;
} SProjectOperatorInfo;
typedef struct STimeEveryOperatorInfo {
SOptrBasicInfo binfo;
int32_t bufCapacity;
uint32_t seed;
int64_t tableEndKey;
SSDataBlock *lastBlock;
SHashObj *rangeStart;
int32_t lastGroupIdx;
bool groupDone;
bool allDone;
SSDataBlock *existDataBlock;
} STimeEveryOperatorInfo;
typedef struct SLimitOperatorInfo {
int64_t limit;
int64_t total;
......@@ -599,13 +621,12 @@ SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOpera
SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream);
SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createAllTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createFillOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput, bool multigroupResult);
SOperatorInfo* createGroupbyOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createAllMultiTableTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTimeEveryOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTagScanOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createDistinctOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput);
SOperatorInfo* createTableBlockInfoScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv);
......@@ -649,7 +670,7 @@ int32_t createQueryFunc(SQueriedTableInfo* pTableInfo, int32_t numOfOutput, SExp
int32_t createIndirectQueryFuncExprFromMsg(SQueryTableMsg *pQueryMsg, int32_t numOfOutput, SExprInfo **pExprInfo,
SSqlExpr **pExpr, SExprInfo *prevExpr, SUdfInfo *pUdfInfo);
int32_t createQueryFilter(char *data, uint16_t len, void** pFilters);
int32_t createQueryFilter(char *data, int32_t len, void** pFilters);
SGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex *pColIndex, int32_t *code);
SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SGroupbyExpr *pGroupbyExpr, SExprInfo *pExprs,
......
......@@ -86,7 +86,7 @@ bool taosFillHasMoreResults(SFillInfo* pFillInfo);
int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType);
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType, bool *exceedMax, bool *exceedMin);
int64_t taosFillResultDataBlock(SFillInfo* pFillInfo, void** output, int32_t capacity);
......
......@@ -101,14 +101,6 @@ typedef struct SFilterRange {
char eflag;
} SFilterRange;
typedef struct SFilterColRange {
uint16_t idx; //column field idx
bool isNull;
bool notNull;
bool isRange;
SFilterRange ra;
} SFilterColRange;
typedef bool (*rangeCompFunc) (const void *, const void *, const void *, const void *, __compar_fn_t);
typedef int32_t(*filter_desc_compare_func)(const void *, const void *);
typedef bool(*filter_exec_func)(void *, int32_t, int8_t**, SDataStatis *, int16_t);
......@@ -160,20 +152,20 @@ typedef struct SFilterField {
} SFilterField;
typedef struct SFilterFields {
uint16_t size;
uint16_t num;
uint32_t size;
uint32_t num;
SFilterField *fields;
} SFilterFields;
typedef struct SFilterFieldId {
uint16_t type;
uint16_t idx;
uint32_t idx;
} SFilterFieldId;
typedef struct SFilterGroup {
uint16_t unitSize;
uint16_t unitNum;
uint16_t *unitIdxs;
uint32_t unitSize;
uint32_t unitNum;
uint32_t *unitIdxs;
uint8_t *unitFlags; // !unit result
} SFilterGroup;
......@@ -186,13 +178,13 @@ typedef struct SFilterColInfo {
} SFilterColInfo;
typedef struct SFilterGroupCtx {
uint16_t colNum;
uint16_t *colIdx;
uint32_t colNum;
uint32_t *colIdx;
SFilterColInfo *colInfo;
} SFilterGroupCtx;
typedef struct SFilterColCtx {
uint16_t colIdx;
uint32_t colIdx;
void* ctx;
} SFilterColCtx;
......@@ -229,13 +221,12 @@ typedef struct SFilterPCtx {
typedef struct SFilterInfo {
uint32_t options;
uint32_t status;
uint16_t unitSize;
uint16_t unitNum;
uint16_t groupNum;
uint16_t colRangeNum;
uint32_t unitSize;
uint32_t unitNum;
uint32_t groupNum;
uint32_t colRangeNum;
SFilterFields fields[FLD_TYPE_MAX];
SFilterGroup *groups;
uint16_t *cgroups;
SFilterUnit *units;
SFilterComUnit *cunits;
uint8_t *unitRes; // result
......@@ -243,15 +234,13 @@ typedef struct SFilterInfo {
SFilterRangeCtx **colRange;
filter_exec_func func;
uint8_t blkFlag;
uint16_t blkGroupNum;
uint16_t *blkUnits;
uint32_t blkGroupNum;
uint32_t *blkUnits;
int8_t *blkUnitRes;
SFilterPCtx pctx;
} SFilterInfo;
#define COL_FIELD_SIZE (sizeof(SFilterField) + 2 * sizeof(int64_t))
#define FILTER_NO_MERGE_DATA_TYPE(t) ((t) == TSDB_DATA_TYPE_BINARY || (t) == TSDB_DATA_TYPE_NCHAR)
#define FILTER_NO_MERGE_OPTR(o) ((o) == TSDB_RELATION_ISNULL || (o) == TSDB_RELATION_NOTNULL || (o) == FILTER_DUMMY_EMPTY_OPTR)
......@@ -268,7 +257,7 @@ typedef struct SFilterInfo {
#define FILTER_CLR_FLAG(st, f) st &= (~f)
#define SIMPLE_COPY_VALUES(dst, src) *((int64_t *)dst) = *((int64_t *)src)
#define FILTER_PACKAGE_UNIT_HASH_KEY(v, optr, idx1, idx2) do { char *_t = (char *)v; _t[0] = optr; *(uint16_t *)(_t + 1) = idx1; *(uint16_t *)(_t + 3) = idx2; } while (0)
#define FILTER_PACKAGE_UNIT_HASH_KEY(v, optr, idx1, idx2) do { char *_t = (char *)v; _t[0] = optr; *(uint32_t *)(_t + 1) = idx1; *(uint32_t *)(_t + 3) = idx2; } while (0)
#define FILTER_GREATER(cr,sflag,eflag) ((cr > 0) || ((cr == 0) && (FILTER_GET_FLAG(sflag,RANGE_FLG_EXCLUDE) || FILTER_GET_FLAG(eflag,RANGE_FLG_EXCLUDE))))
#define FILTER_COPY_RA(dst, src) do { (dst)->sflag = (src)->sflag; (dst)->eflag = (src)->eflag; (dst)->s = (src)->s; (dst)->e = (src)->e; } while (0)
......@@ -323,7 +312,7 @@ typedef struct SFilterInfo {
#define FILTER_PUSH_VAR_HASH(colInfo, ha) do { (colInfo).type = RANGE_TYPE_VAR_HASH; (colInfo).info = ha;} while (0)
#define FILTER_PUSH_CTX(colInfo, ctx) do { (colInfo).type = RANGE_TYPE_MR_CTX; (colInfo).info = ctx;} while (0)
#define FILTER_COPY_IDX(dst, src, n) do { *(dst) = malloc(sizeof(uint16_t) * n); memcpy(*(dst), src, sizeof(uint16_t) * n);} while (0)
#define FILTER_COPY_IDX(dst, src, n) do { *(dst) = malloc(sizeof(uint32_t) * n); memcpy(*(dst), src, sizeof(uint32_t) * n);} while (0)
#define FILTER_ADD_CTX_TO_GRES(gres, idx, ctx) do { if ((gres)->colCtxs == NULL) { (gres)->colCtxs = taosArrayInit(gres->colNum, sizeof(SFilterColCtx)); } SFilterColCtx cCtx = {idx, ctx}; taosArrayPush((gres)->colCtxs, &cCtx); } while (0)
......
......@@ -85,6 +85,11 @@ typedef struct SIntervalVal {
SStrToken offset;
} SIntervalVal;
typedef struct SRangeVal {
void *start;
void *end;
} SRangeVal;
typedef struct SSessionWindowVal {
SStrToken col;
SStrToken gap;
......@@ -111,6 +116,7 @@ typedef struct SSqlNode {
SLimitVal slimit; // group limit offset [optional]
SStrToken sqlstr; // sql string in select clause
struct tSqlExpr *pHaving; // having clause [optional]
SRangeVal pRange; // range clause [optional]
} SSqlNode;
typedef struct SRelElementPair {
......@@ -272,6 +278,7 @@ typedef struct tSqlExprItem {
bool distinct;
} tSqlExprItem;
SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder);
SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index);
SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder);
......@@ -281,6 +288,7 @@ void *destroyRelationInfo(SRelationInfo* pFromInfo);
SRelationInfo *addSubqueryElem(SRelationInfo* pRelationInfo, SArray* pSub, SStrToken* pAlias);
// sql expr leaf node
tSqlExpr *tSqlExprCreateTimestamp(SStrToken *pToken, int32_t optrType);
tSqlExpr *tSqlExprCreateIdValue(SSqlInfo* pInfo, SStrToken *pToken, int32_t optrType);
tSqlExpr *tSqlExprCreateFunction(SArray *pParam, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType);
SArray *tStrTokenAppend(SArray *pList, SStrToken *pToken);
......@@ -296,7 +304,7 @@ void tSqlExprListDestroy(SArray *pList);
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SSessionWindowVal *ps, SWindowStateVal *pw,
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving);
SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pgLimit, tSqlExpr *pHaving, SRangeVal *pRange);
int32_t tSqlExprCompare(tSqlExpr *left, tSqlExpr *right);
SCreateTableSql *tSetCreateTableInfo(SArray *pCols, SArray *pTags, SSqlNode *pSelect, int32_t type);
......
......@@ -144,6 +144,8 @@ typedef struct SQueryInfo {
bool udfCopy;
SArray *pUdfInfo;
STimeWindow range; // range for interp
struct SQInfo *pQInfo; // global merge operator
struct SQueryAttr *pQueryAttr; // query object
......
......@@ -127,12 +127,6 @@ cmd ::= SHOW dbPrefix(X) VGROUPS. {
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0);
}
cmd ::= SHOW dbPrefix(X) VGROUPS ids(Y). {
SStrToken token;
tSetDbName(&token, &X);
setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &Y);
}
//drop configure for tables
cmd ::= DROP TABLE ifexists(Y) ids(X) cpxName(Z). {
X.n += Z.n;
......@@ -482,8 +476,8 @@ tagitem(A) ::= PLUS(X) FLOAT(Y). {
//////////////////////// The SELECT statement /////////////////////////////////
%type select {SSqlNode*}
%destructor select {destroySqlNode($$);}
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) interval_option(K) sliding_opt(S) session_option(H) windowstate_option(D) fill_opt(F)groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N);
select(A) ::= SELECT(T) selcollist(W) from(X) where_opt(Y) range_option(R) interval_option(K) sliding_opt(S) session_option(H) windowstate_option(D) fill_opt(F)groupby_opt(P) having_opt(N) orderby_opt(Z) slimit_opt(G) limit_opt(L). {
A = tSetQuerySqlNode(&T, W, X, Y, P, Z, &K, &H, &D, &S, F, &L, &G, N, &R);
}
select(A) ::= LP select(B) RP. {A = B;}
......@@ -501,7 +495,7 @@ cmd ::= union(X). { setSqlInfo(pInfo, X, NULL, TSDB_SQL_SELECT); }
// select client_version()
// select server_state()
select(A) ::= SELECT(T) selcollist(W). {
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
A = tSetQuerySqlNode(&T, W, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
// selcollist is a list of expressions that are to become the return
......@@ -572,6 +566,22 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). {
%type tmvar {SStrToken}
tmvar(A) ::= VARIABLE(X). {A = X;}
%type timestamp {tSqlExpr*}
%destructor timestamp {tSqlExprDestroy($$);}
timestamp(A) ::= INTEGER(X). { A = tSqlExprCreateTimestamp(&X, TK_INTEGER);}
timestamp(A) ::= MINUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSqlExprCreateTimestamp(&X, TK_INTEGER);}
timestamp(A) ::= PLUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSqlExprCreateTimestamp(&X, TK_INTEGER);}
timestamp(A) ::= STRING(X). { A = tSqlExprCreateTimestamp(&X, TK_STRING);}
timestamp(A) ::= NOW(X). { A = tSqlExprCreateTimestamp(&X, TK_NOW); }
timestamp(A) ::= NOW PLUS VARIABLE(Y). {A = tSqlExprCreateTimestamp(&Y, TK_PLUS); }
timestamp(A) ::= NOW MINUS VARIABLE(Y). {A = tSqlExprCreateTimestamp(&Y, TK_MINUS); }
%type range_option {SRangeVal}
range_option(N) ::= . {N.start = 0; N.end = 0;}
range_option(N) ::= RANGE LP timestamp(E) COMMA timestamp(X) RP. {N.start = E; N.end = X;}
%type interval_option {SIntervalVal}
interval_option(N) ::= intervalKey(A) LP tmvar(E) RP. {N.interval = E; N.offset.n = 0; N.token = A;}
interval_option(N) ::= intervalKey(A) LP tmvar(E) COMMA tmvar(X) RP. {N.interval = E; N.offset = X; N.token = A;}
......
此差异已折叠。
此差异已折叠。
......@@ -118,10 +118,11 @@ static void doFillOneRowResult(SFillInfo* pFillInfo, void** data, char** srcData
continue;
}
bool exceedMax = false, exceedMin = false;
point1 = (SPoint){.key = *(TSKEY*)(prev), .val = prev + pCol->col.offset};
point2 = (SPoint){.key = ts, .val = srcData[i] + pFillInfo->index * bytes};
point = (SPoint){.key = pFillInfo->currentKey, .val = val1};
taosGetLinearInterpolationVal(&point, type, &point1, &point2, type);
taosGetLinearInterpolationVal(&point, type, &point1, &point2, type, &exceedMax, &exceedMin);
}
} else {
setNullValueForRow(pFillInfo, data, pFillInfo->numOfCols, index);
......@@ -493,12 +494,20 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma
return (numOfRes > maxNumOfRows) ? maxNumOfRows : numOfRes;
}
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType) {
double v1 = -1, v2 = -1;
int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType, bool *exceedMax, bool *exceedMin) {
double v1 = -1, v2 = -1, vmax = -1, vmin = -1;
GET_TYPED_DATA(v1, double, inputType, point1->val);
GET_TYPED_DATA(v2, double, inputType, point2->val);
GET_TYPED_DATA(vmax, double, outputType, getDataMax(outputType));
GET_TYPED_DATA(vmin, double, outputType, getDataMin(outputType));
double r = DO_INTERPOLATION(v1, v2, point1->key, point2->key, point->key);
if (r >= vmax) {
*exceedMax = true;
} else if (r <= vmin) {
*exceedMin = true;
}
SET_TYPED_DATA(point->val, outputType, r);
return TSDB_CODE_SUCCESS;
......
此差异已折叠。
......@@ -538,9 +538,9 @@ SArray* createTableScanPlan(SQueryAttr* pQueryAttr) {
} else {
if (pQueryAttr->queryBlockDist) {
op = OP_TableBlockInfoScan;
} else if (pQueryAttr->tsCompQuery || pQueryAttr->pointInterpQuery || pQueryAttr->diffQuery) {
} else if (pQueryAttr->tsCompQuery || pQueryAttr->diffQuery) {
op = OP_TableSeqScan;
} else if (pQueryAttr->needReverseScan) {
} else if (pQueryAttr->needReverseScan || pQueryAttr->pointInterpQuery) {
op = OP_DataBlocksOptScan;
} else {
op = OP_TableScan;
......@@ -564,20 +564,15 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) {
op = OP_Distinct;
taosArrayPush(plan, &op);
}
} else if (pQueryAttr->pointInterpQuery) {
op = OP_TimeEvery;
taosArrayPush(plan, &op);
} else if (pQueryAttr->interval.interval > 0) {
if (pQueryAttr->stableQuery) {
if (pQueryAttr->pointInterpQuery) {
op = OP_AllMultiTableTimeInterval;
} else {
op = OP_MultiTableTimeInterval;
}
op = OP_MultiTableTimeInterval;
taosArrayPush(plan, &op);
} else {
if (pQueryAttr->pointInterpQuery) {
op = OP_AllTimeWindow;
} else {
op = OP_TimeWindow;
}
op = OP_TimeWindow;
taosArrayPush(plan, &op);
if (pQueryAttr->pExpr2 != NULL) {
......@@ -704,7 +699,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) {
}
// fill operator
if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0) {
if (pQueryAttr->fillType != TSDB_FILL_NONE && pQueryAttr->interval.interval > 0 && !pQueryAttr->pointInterpQuery) {
op = OP_Fill;
taosArrayPush(plan, &op);
}
......
......@@ -192,6 +192,65 @@ tSqlExpr *tSqlExprCreateIdValue(SSqlInfo* pInfo, SStrToken *pToken, int32_t optr
return pSqlExpr;
}
tSqlExpr *tSqlExprCreateTimestamp(SStrToken *pToken, int32_t optrType) {
tSqlExpr *pSqlExpr = calloc(1, sizeof(tSqlExpr));
if (pToken != NULL) {
pSqlExpr->exprToken = *pToken;
}
if (optrType == TK_INTEGER || optrType == TK_STRING) {
if (pToken) {
toTSDBType(pToken->type);
tVariantCreate(&pSqlExpr->value, pToken);
}
pSqlExpr->tokenId = optrType;
pSqlExpr->type = SQL_NODE_VALUE;
} else if (optrType == TK_NOW) {
// use nanosecond by default TODO set value after getting database precision
pSqlExpr->value.i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO);
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
pSqlExpr->tokenId = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond
pSqlExpr->type = SQL_NODE_VALUE;
pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP;
} else if (optrType == TK_PLUS || optrType == TK_MINUS) {
// use nanosecond by default
// TODO set value after getting database precision
if (pToken) {
char unit = 0;
int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->value.i64, &unit, TSDB_TIME_PRECISION_NANO);
if (ret != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
}
if (optrType == TK_PLUS) {
pSqlExpr->value.i64 += taosGetTimestamp(TSDB_TIME_PRECISION_NANO);
} else {
pSqlExpr->value.i64 = taosGetTimestamp(TSDB_TIME_PRECISION_NANO) - pSqlExpr->value.i64;
}
pSqlExpr->flags |= 1 << EXPR_FLAG_NS_TIMESTAMP;
pSqlExpr->value.nType = TSDB_DATA_TYPE_BIGINT;
pSqlExpr->tokenId = TK_TIMESTAMP;
pSqlExpr->type = SQL_NODE_VALUE;
} else {
// Here it must be the column name (tk_id) if it is not a number or string.
assert(optrType == TK_ID || optrType == TK_ALL);
if (pToken != NULL) {
pSqlExpr->columnName = *pToken;
}
pSqlExpr->tokenId = optrType;
pSqlExpr->type = SQL_NODE_TABLE_COLUMN;
}
return pSqlExpr;
}
/*
* pList is the parameters for function with id(optType)
* function name is denoted by pFunctionToken
......@@ -751,7 +810,7 @@ void tSetColumnType(TAOS_FIELD *pField, SStrToken *type) {
SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelationInfo *pFrom, tSqlExpr *pWhere,
SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval,
SSessionWindowVal *pSession, SWindowStateVal *pWindowStateVal, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit,
SLimitVal *psLimit, tSqlExpr *pHaving) {
SLimitVal *psLimit, tSqlExpr *pHaving, SRangeVal *pRange) {
assert(pSelNodeList != NULL);
SSqlNode *pSqlNode = calloc(1, sizeof(SSqlNode));
......@@ -767,7 +826,10 @@ SSqlNode *tSetQuerySqlNode(SStrToken *pSelectToken, SArray *pSelNodeList, SRelat
pSqlNode->pWhere = pWhere;
pSqlNode->fillType = pFill;
pSqlNode->pHaving = pHaving;
if (pRange) {
pSqlNode->pRange = *pRange;
}
if (pLimit != NULL) {
pSqlNode->limit = *pLimit;
} else {
......
此差异已折叠。
......@@ -342,7 +342,7 @@ int tsdbUpdateTableTagValue(STsdbRepo *repo, SUpdateTableTagValMsg *pMsg) {
tsdbError(
"vgId:%d failed to update tag value of table %s since version out of date, client tag version %d server tag "
"version %d",
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->tagSchema));
REPO_ID(pRepo), TABLE_CHAR_NAME(pTable), pMsg->tversion, schemaVersion(pTable->pSuper->tagSchema));
terrno = TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE;
return -1;
}
......
......@@ -683,8 +683,8 @@ SArray* tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle) {
TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) {
STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pRef);
pQueryHandle->loadExternalRow = true;
pQueryHandle->currentLoadExternalRows = true;
//pQueryHandle->loadExternalRow = true;
//pQueryHandle->currentLoadExternalRows = true;
return pQueryHandle;
}
......@@ -1377,66 +1377,63 @@ static int32_t loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBlock,
return code;
}
static int doBinarySearchKey(char* pValue, int num, TSKEY key, int order) {
int firstPos, lastPos, midPos = -1;
int numOfRows;
TSKEY* keyList;
assert(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
if (num <= 0) return -1;
keyList = (TSKEY*)pValue;
firstPos = 0;
lastPos = num - 1;
if (order == TSDB_ORDER_DESC) {
// search last keyList[ret] < key order asc and keyList[ret] > key order desc
static int doBinarySearchKey(TSKEY* keyList, int num, int pos, TSKEY key, int order) {
// start end posistion
int s, e;
s = pos;
// check
assert(pos >=0 && pos < num);
assert(num > 0);
if (order == TSDB_ORDER_ASC) {
// find the first position which is smaller than the key
e = num - 1;
if (key < keyList[pos])
return -1;
while (1) {
if (key >= keyList[lastPos]) return lastPos;
if (key == keyList[firstPos]) return firstPos;
if (key < keyList[firstPos]) return firstPos - 1;
numOfRows = lastPos - firstPos + 1;
midPos = (numOfRows >> 1) + firstPos;
if (key < keyList[midPos]) {
lastPos = midPos - 1;
} else if (key > keyList[midPos]) {
firstPos = midPos + 1;
} else {
break;
}
}
} else {
// check can return
if (key >= keyList[e])
return e;
if (key <= keyList[s])
return s;
if (e - s <= 1)
return s;
// change start or end position
int mid = s + (e - s + 1)/2;
if (keyList[mid] > key)
e = mid;
else if(keyList[mid] < key)
s = mid;
else
return mid;
}
} else { // DESC
// find the first position which is bigger than the key
while (1) {
if (key <= keyList[firstPos]) return firstPos;
if (key == keyList[lastPos]) return lastPos;
if (key > keyList[lastPos]) {
lastPos = lastPos + 1;
if (lastPos >= num)
return -1;
e = 0;
if (key > keyList[pos])
return -1;
while (1) {
// check can return
if (key <= keyList[e])
return e;
if (key >= keyList[s])
return s;
if (s - e <= 1)
return s;
// change start or end position
int mid = s - (s - e + 1)/2;
if (keyList[mid] < key)
e = mid;
else if(keyList[mid] > key)
s = mid;
else
return lastPos;
}
numOfRows = lastPos - firstPos + 1;
midPos = (numOfRows >> 1) + firstPos;
if (key < keyList[midPos]) {
lastPos = midPos - 1;
} else if (key > keyList[midPos]) {
firstPos = midPos + 1;
} else {
break;
}
return mid;
}
}
}
return midPos;
}
static int32_t doCopyRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, int32_t capacity, int32_t numOfRows, int32_t start, int32_t end) {
......@@ -1844,7 +1841,6 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl
int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo) {
// NOTE: reverse the order to find the end position in data block
int32_t endPos = -1;
int32_t order = ASCENDING_TRAVERSE(pQueryHandle->order)? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
SQueryFilePos* cur = &pQueryHandle->cur;
SDataCols* pCols = pQueryHandle->rhelper.pDCols[0];
......@@ -1857,7 +1853,9 @@ int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBl
cur->mixBlock = (cur->pos != pBlockInfo->rows - 1);
} else {
assert(pCols->numOfRows > 0);
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pQueryHandle->window.ekey, order);
int pos = ASCENDING_TRAVERSE(pQueryHandle->order)? 0 : pBlockInfo->rows - 1;
endPos = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pos, pQueryHandle->window.ekey, pQueryHandle->order);
assert(endPos != -1);
cur->mixBlock = true;
}
......@@ -1877,17 +1875,16 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
assert(pCols->cols[0].type == TSDB_DATA_TYPE_TIMESTAMP && pCols->cols[0].colId == PRIMARYKEY_TIMESTAMP_COL_INDEX &&
cur->pos >= 0 && cur->pos < pBlock->numOfRows);
TSKEY* tsArray = pCols->cols[0].pData;
assert(pCols->numOfRows == pBlock->numOfRows && tsArray[0] == pBlock->keyFirst && tsArray[pBlock->numOfRows-1] == pBlock->keyLast);
// for search the endPos, so the order needs to reverse
int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC;
// key read from file
TSKEY* keyFile = pCols->cols[0].pData;
assert(pCols->numOfRows == pBlock->numOfRows && keyFile[0] == pBlock->keyFirst && keyFile[pBlock->numOfRows-1] == pBlock->keyLast);
int32_t step = ASCENDING_TRAVERSE(pQueryHandle->order)? 1:-1;
int32_t numOfCols = (int32_t)(QH_GET_NUM_OF_COLS(pQueryHandle));
STable* pTable = pCheckInfo->pTableObj;
int32_t endPos = getEndPosInDataBlock(pQueryHandle, &blockInfo);
tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d,"
"end:%d, 0x%"PRIx64,
......@@ -1902,6 +1899,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
STSchema* pSchema1 = NULL;
STSchema* pSchema2 = NULL;
// position in file ->fpos
int32_t pos = cur->pos;
cur->win = TSWINDOW_INITIALIZER;
......@@ -1918,19 +1916,23 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
break;
}
TSKEY key = memRowKey(row1);
if ((key > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
TSKEY keyMem = memRowKey(row1);
if ((keyMem > pQueryHandle->window.ekey && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(keyMem < pQueryHandle->window.ekey && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
break;
}
if (((pos > endPos || tsArray[pos] > pQueryHandle->window.ekey) && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
((pos < endPos || tsArray[pos] < pQueryHandle->window.ekey) && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
break;
// break if pos not in this block endPos range. note old code when pos is -1 can crash.
if(ASCENDING_TRAVERSE(pQueryHandle->order)) { //ASC
if(pos > endPos || keyFile[pos] > pQueryHandle->window.ekey)
break;
} else { //DESC
if(pos < endPos || keyFile[pos] < pQueryHandle->window.ekey)
break;
}
if ((key < tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key > tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if ((keyMem < keyFile[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(keyMem > keyFile[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (rv1 != memRowVersion(row1)) {
pSchema1 = tsdbGetTableSchemaByVersion(pTable, memRowVersion(row1), (int8_t)memRowType(row1));
rv1 = memRowVersion(row1);
......@@ -1942,16 +1944,18 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, true);
numOfRows += 1;
// record start key with memory key if not
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
cur->win.skey = keyMem;
}
cur->win.ekey = key;
cur->lastKey = key + step;
cur->win.ekey = keyMem;
cur->lastKey = keyMem + step;
cur->mixBlock = true;
moveToNextRowInMem(pCheckInfo);
} else if (key == tsArray[pos]) { // data in buffer has the same timestamp of data in file block, ignore it
// same select mem key if update is true
} else if (keyMem == keyFile[pos]) {
if (pCfg->update) {
if(pCfg->update == TD_ROW_PARTIAL_UPDATE) {
doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, pos, pos);
......@@ -1969,31 +1973,36 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
mergeTwoRowFromMem(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, row1, row2, numOfCols, pTable, pSchema1, pSchema2, forceSetNull);
numOfRows += 1;
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = key;
cur->win.skey = keyMem;
}
cur->win.ekey = key;
cur->lastKey = key + step;
cur->win.ekey = keyMem;
cur->lastKey = keyMem + step;
cur->mixBlock = true;
//mem move next
moveToNextRowInMem(pCheckInfo);
//file move next, discard file row
pos += step;
} else {
// not update, only mem move to next, discard mem row
moveToNextRowInMem(pCheckInfo);
}
} else if ((key > tsArray[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(key < tsArray[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
// put file row
} else if ((keyMem > keyFile[pos] && ASCENDING_TRAVERSE(pQueryHandle->order)) ||
(keyMem < keyFile[pos] && !ASCENDING_TRAVERSE(pQueryHandle->order))) {
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
cur->win.skey = keyFile[pos];
}
int32_t end = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, key, order);
int32_t end = doBinarySearchKey(pCols->cols[0].pData, pCols->numOfRows, pos, keyMem, pQueryHandle->order);
assert(end != -1);
if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
if (keyFile[end] == keyMem) { // the value of key in cache equals to the end timestamp value, ignore it
if (pCfg->update == TD_ROW_DISCARD_UPDATE) {
moveToNextRowInMem(pCheckInfo);
} else {
// can update, don't copy then deal on next loop with keyMem == keyFile[pos]
end -= step;
}
}
......@@ -2001,10 +2010,17 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
int32_t qstart = 0, qend = 0;
getQualifiedRowsPos(pQueryHandle, pos, end, numOfRows, &qstart, &qend);
numOfRows = doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, qstart, qend);
pos += (qend - qstart + 1) * step;
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? tsArray[qend]:tsArray[qstart];
if(qend >= qstart) {
// copy qend - qstart + 1 rows from file
numOfRows = doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, qstart, qend);
int32_t num = qend - qstart + 1;
pos += num * step;
} else {
// nothing copy from file
pos += step;
}
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? keyFile[qend] : keyFile[qstart];
cur->lastKey = cur->win.ekey + step;
}
} while (numOfRows < pQueryHandle->outputCapacity);
......@@ -2021,7 +2037,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
!ASCENDING_TRAVERSE(pQueryHandle->order))) {
// no data in cache or data in cache is greater than the ekey of time window, load data from file block
if (cur->win.skey == TSKEY_INITIAL_VAL) {
cur->win.skey = tsArray[pos];
cur->win.skey = keyFile[pos];
}
int32_t start = -1, end = -1;
......@@ -2030,7 +2046,7 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo*
numOfRows = doCopyRowsFromFileBlock(pQueryHandle, pQueryHandle->outputCapacity, numOfRows, start, end);
pos += (end - start + 1) * step;
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? tsArray[end]:tsArray[start];
cur->win.ekey = ASCENDING_TRAVERSE(pQueryHandle->order)? keyFile[end] : keyFile[start];
cur->lastKey = cur->win.ekey + step;
cur->mixBlock = true;
}
......@@ -2913,6 +2929,22 @@ static bool loadCachedLast(STsdbQueryHandle* pQueryHandle) {
return false;
}
void tsdbSwitchTable(TsdbQueryHandleT queryHandle) {
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) queryHandle;
STableCheckInfo* pCheckInfo = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
pCheckInfo->numOfBlocks = 0;
pQueryHandle->locateStart = false;
pQueryHandle->checkFiles = true;
pQueryHandle->cur.rows = 0;
pQueryHandle->currentLoadExternalRows = pQueryHandle->loadExternalRow;
terrno = TSDB_CODE_SUCCESS;
++pQueryHandle->activeIndex;
}
static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
......@@ -2946,6 +2978,9 @@ static bool loadDataBlockFromTableSeq(STsdbQueryHandle* pQueryHandle) {
// handle data in cache situation
bool tsdbNextDataBlock(TsdbQueryHandleT pHandle) {
STsdbQueryHandle* pQueryHandle = (STsdbQueryHandle*) pHandle;
if (pQueryHandle == NULL) {
return false;
}
if (emptyQueryTimewindow(pQueryHandle)) {
tsdbDebug("%p query window not overlaps with the data set, no result returned, 0x%"PRIx64, pQueryHandle, pQueryHandle->qId);
......@@ -3065,6 +3100,9 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM
pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qId, pMemRef);
tfree(cond.colList);
if (pSecQueryHandle == NULL) {
goto out_of_memory;
}
// current table, only one table
STableCheckInfo* pCurrent = taosArrayGet(pQueryHandle->pTableCheckInfo, pQueryHandle->activeIndex);
......
......@@ -229,7 +229,8 @@ static SKeyword keywordTable[] = {
{"FUNCTIONS", TK_FUNCTIONS},
{"OUTPUTTYPE", TK_OUTPUTTYPE},
{"AGGREGATE", TK_AGGREGATE},
{"BUFSIZE", TK_BUFSIZE}
{"BUFSIZE", TK_BUFSIZE},
{"RANGE", TK_RANGE}
};
static const char isIdChar[] = {
......
......@@ -164,8 +164,12 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
//get precisionin parameter restultset
//get precision in restultset
[DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
static extern public int ResultPrecision(IntPtr taos);
//schemaless API
[DllImport("taos",SetLastError = true, EntryPoint = "taos_schemaless_insert", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr SchemalessInsert(IntPtr taos, string[] lines, int numLines, int protocol, int precision);
}
}
......@@ -164,8 +164,12 @@ namespace TDengineDriver
[DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)]
static extern public int Close(IntPtr taos);
//get precisionin parameter restultset
//get precision in restultset
[DllImport("taos", EntryPoint = "taos_result_precision", CallingConvention = CallingConvention.Cdecl)]
static extern public int ResultPrecision(IntPtr taos);
//schemaless API
[DllImport("taos",SetLastError = true, EntryPoint = "taos_schemaless_insert", CallingConvention = CallingConvention.Cdecl)]
static extern public IntPtr SchemalessInsert(IntPtr taos, string[] lines, int numLines, int protocol, int precision);
}
}
此差异已折叠。
此差异已折叠。
......@@ -279,6 +279,7 @@ python3 ./test.py -f query/queryCnameDisplay.py
# python3 ./test.py -f query/long_where_query.py
python3 test.py -f query/nestedQuery/queryWithSpread.py
python3 ./test.py -f query/bug6586.py
# python3 ./test.py -f query/bug5903.py
#stream
python3 ./test.py -f stream/metric_1.py
......@@ -363,7 +364,7 @@ python3 ./test.py -f query/last_row_cache.py
python3 ./test.py -f account/account_create.py
python3 ./test.py -f alter/alter_table.py
python3 ./test.py -f query/queryGroupbySort.py
python3 ./test.py -f functions/queryTestCases.py
#python3 ./test.py -f functions/queryTestCases.py
python3 ./test.py -f functions/function_stateWindow.py
python3 ./test.py -f functions/function_derivative.py
python3 ./test.py -f functions/function_irate.py
......
......@@ -1289,6 +1289,34 @@ class TDTestCase:
" fill(linear)"
]
tdSql.prepare()
#need insert new data --data type is double or float and tests ceil floor round .
tdSql.execute("create table if not exists jsons7(ts timestamp, dataInt int, dataBool bool, datafloat float, datadouble double, dataStr nchar(50)) tags(jtag nchar(128))")
tdSql.execute("insert into jsons7_1 using jsons7 tags('{\"nv\":null,\"tea\":true,\"\":false,\" \":123,\"tea\":false}') values (now,2,'true',0.9,0.1,'123')")
tdSql.query("select * from jsons7")
tdSql.checkRows(1)
tdSql.execute("insert into jsons7_1 values (now+1s,3,'true',-4.8,-5.5,'123') ")
tdSql.execute("insert into jsons7_1 values (now+2s,4,'true',1.9998,2.00001,'123') ")
tdSql.execute("insert into jsons7_2 using jsons7 tags('{\"nv\":null,\"tea\":true,\"\":false,\"tag\":123,\"tea\":false}') values (now,5,'true',4.01,2.2,'123') ")
tdSql.execute("insert into jsons7_2 (ts,datadouble) values (now+3s,-0.9) ")
tdSql.execute("insert into jsons7_2 (ts,datadouble) values (now+4s,-2.9) ")
tdSql.execute("insert into jsons7_2 (ts,datafloat) values (now+1s,-0.9) ")
tdSql.execute("insert into jsons7_2 (ts,datafloat) values (now+2s,-1.9) ")
tdSql.query("select ts,ceil(dataint),ceil(datafloat),ceil(datadouble) from jsons7")
tdSql.checkRows(8)
tdSql.checkData(5, 1, None)
tdSql.checkData(6, 2, None)
tdSql.checkData(7, 3, -2)
tdSql.query("select ceil(dataint),ceil(datafloat),ceil(datadouble) from jsons7")
tdSql.checkRows(8)
tdSql.checkData(5, 1, -1)
tdSql.checkData(5, 2, None)
tdSql.checkData(7, 0, None)
tdSql.checkData(7, 2, -2)
tdSql.query("select ts,floor(dataint),floor(datafloat),floor(datadouble) from jsons7")
tdSql.query("select floor(dataint),floor(datafloat),floor(datadouble) from jsons7")
tdSql.query("select ts,round(dataint),round(datafloat),round(datadouble) from jsons7")
tdSql.query("select round(dataint),round(datafloat),round(datadouble) from jsons7")
tdSql.execute(
"create stable super (ts timestamp, timestamp_col timestamp, int_col int, bigint_col bigint, float_col float,\
double_col double, binary_col binary(8), smallint_col smallint, tinyint_col tinyint, bool_col bool, nchar_col nchar(8), \
......
......@@ -68,9 +68,9 @@ class TDTestCase:
tdSql.checkData(0, 0, "2018-09-17 09:00:10.000")
tdSql.checkData(0, 1, "2018-09-17 09:00:10.000")
tdSql.checkData(0, 3, "2018-09-17 09:00:10.000")
tdSql.checkData(1, 0, "2018-09-17 09:00:20.009")
tdSql.checkData(1, 1, "2018-09-17 09:00:20.009")
tdSql.checkData(1, 3, "2018-09-17 09:00:20.009")
tdSql.checkData(1, 0, "2018-09-17 09:00:20.000")
tdSql.checkData(1, 1, "2018-09-17 09:00:20.000")
tdSql.checkData(1, 3, "2018-09-17 09:00:20.000")
tdSql.query("select ts from(select ts,derivative(col, 10s, 0) from stb group by tbname)")
......@@ -150,6 +150,7 @@ class TDTestCase:
tdSql.error("select derivative(col, -106752999999999922222d, 0) from stb group by tbname"); #overflow error
tdSql.error("select derivative(col, 10y, 0) from stb group by tbname") #TD-10399, DB error: syntax error near '10y, 0) from stb group by tbname;'
tdSql.error("select derivative(col, -106752d, 0) from stb group by tbname") #TD-10398 overflow tips
tdSql.error("select derivative(col, 106751991168d, 0) from stb group by tbname") #TD-10398 overflow tips
def run(self):
tdSql.prepare()
......
......@@ -84,7 +84,7 @@ class TDTestCase:
index_value = np.dstack((cpms_index[0])).squeeze()
tdSql.query("show variables")
tdSql.checkData(index_value, 1, -1)
tdSql.checkData(index_value, 1, 524288)
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
......@@ -1570,7 +1570,7 @@ class TDTestCase:
# master branch
self.td3690()
self.td4082()
# self.td4082()
self.td4288()
self.td4724()
self.td5935()
......
......@@ -45,6 +45,11 @@ class TDTestCase:
# test case for https://jira.taosdata.com:18080/browse/TD-3716:
tdSql.error("insert into tb(now, 1)")
# test case for TD-10717
tdSql.error("insert into tb values(now,1),,(now+1s,1)")
tdSql.execute("insert into tb values(now+2s,1),(now+3s,1),(now+4s,1)")
tdSql.query("select * from tb")
tdSql.checkRows(insertRows + 4 +3)
def stop(self):
tdSql.close()
......
此差异已折叠。
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
from util.log import *
from util.cases import *
from util.sql import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
# TD-5903 show db.vgroups xxx. xxx is invalid content, but still returns results.
tdSql.execute("create database if not exists test_5903")
tdSql.execute("show test_5903.vgroups")
tdSql.error("show test_5903.vgroups xxx")
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
此差异已折叠。
此差异已折叠。
......@@ -178,6 +178,8 @@ class TDTestCase:
tdSql.checkRows(1)
self.tb193new = "table_193~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST0123456789table_192~!@#$%^&*()-_+=[]{}':,<.>/?stST123"
tdSql.error("create table db.`%s` using db.`%s` tags(1)" %(self.tb193new,self.stb1))
# case for TD-10691
tdSql.error("create table ttb1(ts timestamp, file int )")
......
此差异已折叠。
......@@ -130,7 +130,7 @@ run general/parser/limit2.sim
run general/parser/slimit.sim
run general/parser/fill.sim
run general/parser/fill_stb.sim
run general/parser/interp.sim
run general/parser/interp_full.sim
run general/parser/where.sim
run general/parser/join.sim
run general/parser/join_multivnode.sim
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -20,7 +20,7 @@ run general/parser/import_commit3.sim
run general/parser/import_file.sim
run general/parser/insert_tb.sim
run general/parser/tags_dynamically_specifiy.sim
run general/parser/interp.sim
run general/parser/interp_full.sim
run general/parser/lastrow.sim
run general/parser/limit.sim
run general/parser/limit1.sim
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册