未验证 提交 19c185c8 编写于 作者: H huili 提交者: GitHub

Merge branch 'master' into test/TD-3921

......@@ -203,6 +203,14 @@ static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOf
tNameExtractFullName(&pTableMetaInfo->name, name);
taosHashRemove(tscTableMetaInfo, name, strnlen(name, TSDB_TABLE_FNAME_LEN));
tfree(pTableMetaInfo->pTableMeta);
tscFreeSqlResult(pStream->pSql);
tscFreeSubobj(pStream->pSql);
tfree(pStream->pSql->pSubs);
pStream->pSql->subState.numOfSub = 0;
pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
......
......@@ -48,6 +48,13 @@ void tVariantCreate(tVariant *pVar, SStrToken *token) {
case TSDB_DATA_TYPE_INT:{
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, true);
if (ret != 0) {
SStrToken t = {0};
tSQLGetToken(token->z, &t.type);
if (t.type == TK_MINUS) { // it is a signed number which is greater than INT64_MAX or less than INT64_MIN
pVar->nType = -1; // -1 means error type
return;
}
// data overflow, try unsigned parse the input number
ret = tStrToInteger(token->z, token->type, token->n, &pVar->i64, false);
if (ret != 0) {
......
......@@ -81,7 +81,7 @@ enum QUERY_MODE {
#define MAX_DB_NAME_SIZE 64
#define MAX_HOSTNAME_SIZE 64
#define MAX_TB_NAME_SIZE 64
#define MAX_DATA_SIZE 16000
#define MAX_DATA_SIZE (16*1024)
#define MAX_NUM_DATATYPE 10
#define OPT_ABORT 1 /* –abort */
#define STRING_LEN 60000
......@@ -3657,7 +3657,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
goto PARSE_OVER;
}
/*
cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num");
if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) {
g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint;
......@@ -3667,7 +3666,6 @@ static bool getMetaFromInsertJsonFile(cJSON* root) {
printf("ERROR: failed to read json, batch_create_tbl_num not found\n");
goto PARSE_OVER;
}
*/
cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no
if (childTblExists
......
......@@ -240,6 +240,7 @@ python3 ./test.py -f stream/sys.py
python3 ./test.py -f stream/table_1.py
python3 ./test.py -f stream/table_n.py
python3 ./test.py -f stream/showStreamExecTimeisNull.py
python3 ./test.py -f stream/cqSupportBefore1970.py
#alter table
python3 ./test.py -f alter/alter_table_crash.py
......
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to execute {__file__}")
tdSql.init(conn.cursor(), logSql)
def insertnow(self):
# timestamp list:
# 0 -> "1970-01-01 08:00:00" | -28800000 -> "1970-01-01 00:00:00" | -946800000000 -> "1940-01-01 00:00:00"
# -631180800000 -> "1950-01-01 00:00:00"
tsp1 = 0
tsp2 = -28800000
tsp3 = -946800000000
tsp4 = "1969-01-01 00:00:00.000"
tdSql.execute("insert into tcq1 values (now-11d, 5)")
tdSql.execute(f"insert into tcq1 values ({tsp1}, 4)")
tdSql.execute(f"insert into tcq1 values ({tsp2}, 3)")
tdSql.execute(f"insert into tcq1 values ('{tsp4}', 2)")
tdSql.execute(f"insert into tcq1 values ({tsp3}, 1)")
def waitedQuery(self, sql, expectRows, timeout):
tdLog.info(f"sql: {sql}, try to retrieve {expectRows} rows in {timeout} seconds")
try:
for i in range(timeout):
tdSql.cursor.execute(sql)
self.queryResult = tdSql.cursor.fetchall()
self.queryRows = len(self.queryResult)
self.queryCols = len(tdSql.cursor.description)
# tdLog.info("sql: %s, try to retrieve %d rows,get %d rows" % (sql, expectRows, self.queryRows))
if self.queryRows >= expectRows:
return (self.queryRows, i)
time.sleep(1)
except Exception as e:
caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.notice(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, {repr(e)}")
raise Exception(repr(e))
return (self.queryRows, timeout)
def cq(self):
tdSql.execute(
"create table cq1 as select avg(c1) from tcq1 where ts > -946800000000 interval(10d) sliding(1d)"
)
self.waitedQuery("select * from cq1", 1, 120)
def querycq(self):
tdSql.query("select * from cq1")
tdSql.checkData(0, 1, 1.0)
tdSql.checkData(10, 1, 2.0)
def run(self):
tdSql.execute("drop database if exists dbcq")
tdSql.execute("create database if not exists dbcq keep 36500")
tdSql.execute("use dbcq")
tdSql.execute("create table stbcq (ts timestamp, c1 int ) TAGS(t1 int)")
tdSql.execute("create table tcq1 using stbcq tags(1)")
self.insertnow()
self.cq()
self.querycq()
# after wal and sync, check again
tdSql.query("show dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
self.querycq()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册