提交 3a5f3203 编写于 作者: C cpwu

fix case

上级 09982db5
......@@ -199,22 +199,22 @@ class TDCom:
res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0])
return res
def cleanTb(self, type="taosc"):
def cleanTb(self, type="taosc", dbname="db"):
'''
type is taosc or restful
'''
query_sql = "show stables"
query_sql = f"show {dbname}.stables"
res_row_list = tdSql.query(query_sql, True)
stb_list = map(lambda x: x[0], res_row_list)
for stb in stb_list:
if type == "taosc":
tdSql.execute(f'drop table if exists `{stb}`')
tdSql.execute(f'drop table if exists `{dbname}.{stb}`')
if not stb[0].isdigit():
tdSql.execute(f'drop table if exists {stb}')
tdSql.execute(f'drop table if exists {dbname}.{stb}')
elif type == "restful":
self.restApiPost(f"drop table if exists `{stb}`")
self.restApiPost(f"drop table if exists `{dbname}.{stb}`")
if not stb[0].isdigit():
self.restApiPost(f"drop table if exists {stb}")
self.restApiPost(f"drop table if exists {dbname}.{stb}")
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
......
......@@ -13,9 +13,9 @@ from util.common import *
sys.path.append("./6-cluster/")
from clusterCommonCreate import *
from clusterCommonCheck import clusterComCheck
from clusterCommonCheck import clusterComCheck
import threading
import threading
class TDTestCase:
......@@ -28,7 +28,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
tdSql.init(conn.cursor(), False)
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
......@@ -47,7 +47,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
......@@ -55,7 +55,7 @@ class TDTestCase:
dbname="db_tsbs"
stabname1="readings"
stabname2="diagnostics"
ctbnamePre1="rct"
ctbnamePre1="rct"
ctbnamePre2="dct"
ctbNums=40
self.ctbNums=ctbNums
......@@ -73,7 +73,7 @@ class TDTestCase:
self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums)
for j in range(ctbNums):
for j in range(ctbNums):
for i in range(rowNUms):
tdSql.execute(
f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
......@@ -109,19 +109,19 @@ class TDTestCase:
def tsbsIotQuery(self,tdSql):
tdSql.execute("use db_tsbs")
# test interval and partition
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
# print(tdSql.queryResult)
parRows=tdSql.queryRows
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
tdSql.checkRows(parRows)
# # test insert into
# # test insert into
# tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
# tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
# tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
......@@ -141,7 +141,7 @@ class TDTestCase:
tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
# 2 stationary-trucks
# 2 stationary-trucks
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
......@@ -156,7 +156,7 @@ class TDTestCase:
tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
# # 6. avg-daily-driving-session
# # 6. avg-daily-driving-session
# #taosc core dumped
# tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
# tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
......@@ -166,7 +166,7 @@ class TDTestCase:
# 7. avg-load
tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
# 8. daily-activity
# 8. daily-activity
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
......@@ -184,7 +184,7 @@ class TDTestCase:
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
#it's already supported:
# last-loc
tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
......@@ -192,7 +192,7 @@ class TDTestCase:
#2. low-fuel
tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
# 3. avg-vs-projected-fuel-consumption
tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet")
......@@ -213,16 +213,16 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
tdSql.query("select * from information_schema.ins_dnodes;")
tdLog.debug(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
tdLog.info("create database and stable")
tdLog.info("create database and stable")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
......@@ -234,7 +234,7 @@ class TDTestCase:
for tr in threads:
tr.start()
tdLog.info("Take turns stopping %s "%stopRole)
tdLog.info("Take turns stopping %s "%stopRole)
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
......@@ -242,7 +242,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
......@@ -254,7 +254,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
# sleep(10)
# sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
......@@ -265,12 +265,12 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
for tr in threads:
tr.join()
def run(self):
def run(self):
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
self.createCluster()
self.prepareData()
......
......@@ -19,7 +19,7 @@ class TDTestCase:
def init(self, conn, logSql):
## add for TD-6672
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
tdSql.init(conn.cursor(), False)
def insertData(self, tb_name):
insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)',
......@@ -37,17 +37,17 @@ class TDTestCase:
for sql in insert_sql_list:
tdSql.execute(sql)
def initTb(self):
tdCom.cleanTb()
tb_name = tdCom.getLongName(8, "letters")
def initTb(self, dbname="db"):
tdCom.cleanTb(dbname)
tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)")
self.insertData(tb_name)
return tb_name
def initStb(self, count=5):
tdCom.cleanTb()
tb_name = tdCom.getLongName(8, "letters")
def initStb(self, count=5, dbname="db"):
tdCom.cleanTb(dbname)
tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)")
for i in range(1, count+1):
......@@ -56,9 +56,10 @@ class TDTestCase:
self.insertData(f'{tb_name}_sub_{i}')
return tb_name
def initTwoStb(self):
tdCom.cleanTb()
tb_name = tdCom.getLongName(8, "letters")
def initTwoStb(self, dbname="db"):
tdCom.cleanTb(dbname)
tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
# tb_name = tdCom.getLongName(8, "letters")
tb_name1 = f'{tb_name}1'
tb_name2 = f'{tb_name}2'
tdSql.execute(
......
此差异已折叠。
......@@ -120,16 +120,16 @@ class TDTestCase:
return sqls
def __test_current(self): # sourcery skip: use-itertools-product
def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__rtrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
def __test_error(self):
def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__rtrim_err_check(tb):
......@@ -142,17 +142,15 @@ class TDTestCase:
self.__test_error()
def __create_tb(self):
tdSql.prepare()
def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
create_stb_sql = f'''create table stb1(
create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
create_ntb_sql = f'''create table t1(
create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
......@@ -162,29 +160,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
def __insert_data(self, rows):
def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
f'''insert into ct1 values
f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
f'''insert into ct4 values
f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
......@@ -200,7 +198,7 @@ class TDTestCase:
)
tdSql.execute(
f'''insert into ct2 values
f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
......@@ -216,13 +214,13 @@ class TDTestCase:
)
for i in range(rows):
insert_data = f'''insert into t1 values
insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
f'''insert into t1 values
f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
......@@ -251,8 +249,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.execute("flush database db")
tdSql.execute("use db")
......
此差异已折叠。
......@@ -138,6 +138,14 @@ python3 ./test.py -f 2-query/percentile.py
python3 ./test.py -f 2-query/percentile.py -R
python3 ./test.py -f 2-query/pow.py
python3 ./test.py -f 2-query/pow.py -R
python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R
python3 ./test.py -f 2-query/round.py
python3 ./test.py -f 2-query/round.py -R
python3 ./test.py -f 2-query/rtrim.py
# python3 ./test.py -f 2-query/rtrim.py -R
python3 ./test.py -f 2-query/sample.py
python3 ./test.py -f 2-query/sample.py -R
python3 ./test.py -f 1-insert/update_data.py
......@@ -145,7 +153,6 @@ python3 ./test.py -f 1-insert/update_data.py
python3 ./test.py -f 1-insert/delete_data.py
python3 ./test.py -f 2-query/varchar.py
python3 ./test.py -f 2-query/rtrim.py
python3 ./test.py -f 2-query/upper.py
python3 ./test.py -f 2-query/lower.py
python3 ./test.py -f 2-query/join2.py
......@@ -165,12 +172,10 @@ python3 ./test.py -f 2-query/Timediff.py
python3 ./test.py -f 2-query/json_tag.py
python3 ./test.py -f 2-query/top.py
python3 ./test.py -f 2-query/round.py
python3 ./test.py -f 2-query/log.py
python3 ./test.py -f 2-query/sqrt.py
python3 ./test.py -f 2-query/sin.py
python3 ./test.py -f 2-query/tan.py
python3 ./test.py -f 2-query/query_cols_tags_and_or.py
# python3 ./test.py -f 2-query/nestedQuery.py
# TD-15983 subquery output duplicate name column.
# Please Xiangyang Guo modify the following script
......@@ -179,7 +184,6 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 2-query/elapsed.py
python3 ./test.py -f 2-query/csum.py
#python3 ./test.py -f 2-query/mavg.py
python3 ./test.py -f 2-query/sample.py
python3 ./test.py -f 2-query/function_diff.py
python3 ./test.py -f 2-query/unique.py
python3 ./test.py -f 2-query/stateduration.py
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册