未验证 提交 7e8823d5 编写于 作者: S sunpeng 提交者: GitHub

build: delete TaosConsumer and TaosTmq from taospy (#20076)

上级 8030188f
...@@ -18,7 +18,8 @@ from __future__ import annotations ...@@ -18,7 +18,8 @@ from __future__ import annotations
from typing import Any, Set, Tuple from typing import Any, Set, Tuple
from typing import Dict from typing import Dict
from typing import List from typing import List
from typing import Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none from typing import \
Optional # Type hinting, ref: https://stackoverflow.com/questions/19202633/python-3-type-hinting-for-none
import textwrap import textwrap
import time import time
...@@ -39,7 +40,6 @@ import gc ...@@ -39,7 +40,6 @@ import gc
import taos import taos
from taos.tmq import * from taos.tmq import *
from .shared.types import TdColumns, TdTags from .shared.types import TdColumns, TdTags
# from crash_gen import ServiceManager, TdeInstance, TdeSubProcess # from crash_gen import ServiceManager, TdeInstance, TdeSubProcess
...@@ -69,6 +69,7 @@ gSvcMgr: Optional[ServiceManager] # TODO: refactor this hack, use dep injecti ...@@ -69,6 +69,7 @@ gSvcMgr: Optional[ServiceManager] # TODO: refactor this hack, use dep injecti
# logger: logging.Logger # logger: logging.Logger
gContainer: Container gContainer: Container
# def runThread(wt: WorkerThread): # def runThread(wt: WorkerThread):
# wt.run() # wt.run()
...@@ -138,7 +139,7 @@ class WorkerThread: ...@@ -138,7 +139,7 @@ class WorkerThread:
# clean up # clean up
if (Config.getConfig().per_thread_db_connection): # type: ignore if (Config.getConfig().per_thread_db_connection): # type: ignore
if self._dbConn.isOpen: #sometimes it is not open if self._dbConn.isOpen: # sometimes it is not open
self._dbConn.close() self._dbConn.close()
else: else:
Logging.warning("Cleaning up worker thread, dbConn already closed") Logging.warning("Cleaning up worker thread, dbConn already closed")
...@@ -163,7 +164,6 @@ class WorkerThread: ...@@ -163,7 +164,6 @@ class WorkerThread:
Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...")
break break
# Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more) # Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more)
try: try:
if (Config.getConfig().per_thread_db_connection): # most likely TRUE if (Config.getConfig().per_thread_db_connection): # most likely TRUE
...@@ -172,7 +172,8 @@ class WorkerThread: ...@@ -172,7 +172,8 @@ class WorkerThread:
# self.useDb() # might encounter exceptions. TODO: catch # self.useDb() # might encounter exceptions. TODO: catch
except taos.error.ProgrammingError as err: except taos.error.ProgrammingError as err:
errno = Helper.convertErrno(err.errno) errno = Helper.convertErrno(err.errno)
if errno in [0x383, 0x386, 0x00B, 0x014] : # invalid database, dropping, Unable to establish connection, Database not ready if errno in [0x383, 0x386, 0x00B,
0x014]: # invalid database, dropping, Unable to establish connection, Database not ready
# ignore # ignore
dummy = 0 dummy = 0
else: else:
...@@ -180,7 +181,7 @@ class WorkerThread: ...@@ -180,7 +181,7 @@ class WorkerThread:
raise raise
# Fetch a task from the Thread Coordinator # Fetch a task from the Thread Coordinator
Logging.debug( "[TRD] Worker thread [{}] about to fetch task".format(self._tid)) Logging.debug("[TRD] Worker thread [{}] about to fetch task".format(self._tid))
task = tc.fetchTask() task = tc.fetchTask()
# Execute such a task # Execute such a task
...@@ -239,7 +240,7 @@ class WorkerThread: ...@@ -239,7 +240,7 @@ class WorkerThread:
def getQueryResult(self): def getQueryResult(self):
return self.getDbConn().getQueryResult() return self.getDbConn().getQueryResult()
def getDbConn(self) -> DbConn : def getDbConn(self) -> DbConn:
if (Config.getConfig().per_thread_db_connection): if (Config.getConfig().per_thread_db_connection):
return self._dbConn return self._dbConn
else: else:
...@@ -251,6 +252,7 @@ class WorkerThread: ...@@ -251,6 +252,7 @@ class WorkerThread:
# else: # else:
# return self._tc.getDbState().getDbConn().query(sql) # return self._tc.getDbState().getDbConn().query(sql)
# The coordinator of all worker threads, mostly running in main thread # The coordinator of all worker threads, mostly running in main thread
...@@ -374,7 +376,8 @@ class ThreadCoordinator: ...@@ -374,7 +376,8 @@ class ThreadCoordinator:
# TODO: saw an error here once, let's print out stack info for err? # TODO: saw an error here once, let's print out stack info for err?
traceback.print_stack() # Stack frame to here. traceback.print_stack() # Stack frame to here.
Logging.info("Caused by:") Logging.info("Caused by:")
traceback.print_exception(*sys.exc_info()) # Ref: https://www.geeksforgeeks.org/how-to-print-exception-stack-trace-in-python/ traceback.print_exception(
*sys.exc_info()) # Ref: https://www.geeksforgeeks.org/how-to-print-exception-stack-trace-in-python/
transitionFailed = True transitionFailed = True
self._te = None # Not running any more self._te = None # Not running any more
self._execStats.registerFailure("State transition error: {}".format(err)) self._execStats.registerFailure("State transition error: {}".format(err))
...@@ -409,11 +412,10 @@ class ThreadCoordinator: ...@@ -409,11 +412,10 @@ class ThreadCoordinator:
# print("\n") # print("\n")
# print(h.heap()) # print(h.heap())
try: try:
self._syncAtBarrier() # For now just cross the barrier self._syncAtBarrier() # For now just cross the barrier
Progress.emit(Progress.END_THREAD_STEP) Progress.emit(Progress.END_THREAD_STEP)
if self._stepStartTime : if self._stepStartTime:
stepExecTime = time.time() - self._stepStartTime stepExecTime = time.time() - self._stepStartTime
Progress.emitStr('{:.3f}s/{}'.format(stepExecTime, DbConnNative.totalRequests)) Progress.emitStr('{:.3f}s/{}'.format(stepExecTime, DbConnNative.totalRequests))
DbConnNative.resetTotalRequests() # reset to zero DbConnNative.resetTotalRequests() # reset to zero
...@@ -461,7 +463,7 @@ class ThreadCoordinator: ...@@ -461,7 +463,7 @@ class ThreadCoordinator:
self._stepStartTime = time.time() self._stepStartTime = time.time()
self._releaseAllWorkerThreads(transitionFailed) self._releaseAllWorkerThreads(transitionFailed)
if hasAbortedTask or transitionFailed : # abnormal ending, workers waiting at "gate" if hasAbortedTask or transitionFailed: # abnormal ending, workers waiting at "gate"
Logging.debug("Abnormal ending of main thraed") Logging.debug("Abnormal ending of main thraed")
elif workerTimeout: elif workerTimeout:
Logging.debug("Abnormal ending of main thread, due to worker timeout") Logging.debug("Abnormal ending of main thread, due to worker timeout")
...@@ -472,7 +474,7 @@ class ThreadCoordinator: ...@@ -472,7 +474,7 @@ class ThreadCoordinator:
self._te = None # No more executor, time to end self._te = None # No more executor, time to end
Logging.debug("Main thread tapping all threads one last time...") Logging.debug("Main thread tapping all threads one last time...")
self.tapAllThreads() # Let the threads run one last time self.tapAllThreads() # Let the threads run one last time
#TODO: looks like we are not capturing the failures for the last step yet (i.e. calling registerFailure if neccessary) # TODO: looks like we are not capturing the failures for the last step yet (i.e. calling registerFailure if neccessary)
Logging.debug("\r\n\n--> Main thread ready to finish up...") Logging.debug("\r\n\n--> Main thread ready to finish up...")
Logging.debug("Main thread joining all threads") Logging.debug("Main thread joining all threads")
...@@ -492,7 +494,6 @@ class ThreadCoordinator: ...@@ -492,7 +494,6 @@ class ThreadCoordinator:
self._execStats = None self._execStats = None
self._runStatus = None self._runStatus = None
def printStats(self): def printStats(self):
self._execStats.printStats() self._execStats.printStats()
...@@ -529,13 +530,13 @@ class ThreadCoordinator: ...@@ -529,13 +530,13 @@ class ThreadCoordinator:
self._dbs.append(Database(0, dbc)) self._dbs.append(Database(0, dbc))
else: else:
baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic baseDbNumber = int(datetime.datetime.now().timestamp( # Don't use Dice/random, as they are deterministic
)*333) % 888 if Config.getConfig().dynamic_db_table_names else 0 ) * 333) % 888 if Config.getConfig().dynamic_db_table_names else 0
for i in range(Config.getConfig().max_dbs): for i in range(Config.getConfig().max_dbs):
self._dbs.append(Database(baseDbNumber + i, dbc)) self._dbs.append(Database(baseDbNumber + i, dbc))
def pickDatabase(self): def pickDatabase(self):
idxDb = 0 idxDb = 0
if Config.getConfig().max_dbs != 0 : if Config.getConfig().max_dbs != 0:
idxDb = Dice.throw(Config.getConfig().max_dbs) # 0 to N-1 idxDb = Dice.throw(Config.getConfig().max_dbs) # 0 to N-1
db = self._dbs[idxDb] # type: Database db = self._dbs[idxDb] # type: Database
return db return db
...@@ -549,7 +550,7 @@ class ThreadCoordinator: ...@@ -549,7 +550,7 @@ class ThreadCoordinator:
# pick a task type for current state # pick a task type for current state
db = self.pickDatabase() db = self.pickDatabase()
if Dice.throw(2)==1: if Dice.throw(2) == 1:
taskType = db.getStateMachine().pickTaskType() # dynamic name of class taskType = db.getStateMachine().pickTaskType() # dynamic name of class
else: else:
taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types taskType = db.getStateMachine().balance_pickTaskType() # and an method can get balance task types
...@@ -564,6 +565,7 @@ class ThreadCoordinator: ...@@ -564,6 +565,7 @@ class ThreadCoordinator:
with self._lock: with self._lock:
self._executedTasks.append(task) self._executedTasks.append(task)
class ThreadPool: class ThreadPool:
def __init__(self, numThreads, maxSteps): def __init__(self, numThreads, maxSteps):
self.numThreads = numThreads self.numThreads = numThreads
...@@ -587,6 +589,7 @@ class ThreadPool: ...@@ -587,6 +589,7 @@ class ThreadPool:
def cleanup(self): def cleanup(self):
self.threadList = [] # maybe clean up each? self.threadList = [] # maybe clean up each?
# A queue of continguous POSITIVE integers, used by DbManager to generate continuous numbers # A queue of continguous POSITIVE integers, used by DbManager to generate continuous numbers
# for new table names # for new table names
...@@ -729,7 +732,7 @@ class AnyState: ...@@ -729,7 +732,7 @@ class AnyState:
def canDropDb(self): def canDropDb(self):
# If user requests to run up to a number of DBs, # If user requests to run up to a number of DBs,
# we'd then not do drop_db operations any more # we'd then not do drop_db operations any more
if Config.getConfig().max_dbs > 0 or Config.getConfig().use_shadow_db : if Config.getConfig().max_dbs > 0 or Config.getConfig().use_shadow_db:
return False return False
return self._info[self.CAN_DROP_DB] return self._info[self.CAN_DROP_DB]
...@@ -801,7 +804,8 @@ class AnyState: ...@@ -801,7 +804,8 @@ class AnyState:
for task in tasks: for task in tasks:
if isinstance(task, cls): if isinstance(task, cls):
raise CrashGenError( raise CrashGenError(
"This task: {}, is not expected to be present, given the success/failure of others".format(cls.__name__)) "This task: {}, is not expected to be present, given the success/failure of others".format(
cls.__name__))
def assertNoSuccess(self, tasks, cls): def assertNoSuccess(self, tasks, cls):
for task in tasks: for task in tasks:
...@@ -886,7 +890,7 @@ class StateSuperTableOnly(AnyState): ...@@ -886,7 +890,7 @@ class StateSuperTableOnly(AnyState):
def verifyTasksToState(self, tasks, newState): def verifyTasksToState(self, tasks, newState):
if (self.hasSuccess(tasks, TaskDropSuperTable) if (self.hasSuccess(tasks, TaskDropSuperTable)
): # we are able to drop the table ): # we are able to drop the table
#self.assertAtMostOneSuccess(tasks, TaskDropSuperTable) # self.assertAtMostOneSuccess(tasks, TaskDropSuperTable)
# we must have had recreted it # we must have had recreted it
self.hasSuccess(tasks, TaskCreateSuperTable) self.hasSuccess(tasks, TaskCreateSuperTable)
...@@ -934,7 +938,7 @@ class StateHasData(AnyState): ...@@ -934,7 +938,7 @@ class StateHasData(AnyState):
): # only if we didn't create one ): # only if we didn't create one
# we shouldn't have dropped it # we shouldn't have dropped it
self.assertNoTask(tasks, TaskDropDb) self.assertNoTask(tasks, TaskDropDb)
if not( self.hasTask(tasks, TaskCreateSuperTable) if not (self.hasTask(tasks, TaskCreateSuperTable)
): # if we didn't create the table ): # if we didn't create the table
# we should not have a task that drops it # we should not have a task that drops it
self.assertNoTask(tasks, TaskDropSuperTable) self.assertNoTask(tasks, TaskDropSuperTable)
...@@ -999,9 +1003,9 @@ class StateMechine: ...@@ -999,9 +1003,9 @@ class StateMechine:
def _findCurrentState(self, dbc: DbConn): def _findCurrentState(self, dbc: DbConn):
ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state ts = time.time() # we use this to debug how fast/slow it is to do the various queries to find the current DB state
dbName =self._db.getName() dbName = self._db.getName()
if not dbc.existsDatabase(dbName): # dbc.hasDatabases(): # no database?! if not dbc.existsDatabase(dbName): # dbc.hasDatabases(): # no database?!
Logging.debug( "[STT] empty database found, between {} and {}".format(ts, time.time())) Logging.debug("[STT] empty database found, between {} and {}".format(ts, time.time()))
return StateEmpty() return StateEmpty()
# did not do this when openning connection, and this is NOT the worker # did not do this when openning connection, and this is NOT the worker
# thread, which does this on their own # thread, which does this on their own
...@@ -1016,7 +1020,6 @@ class StateMechine: ...@@ -1016,7 +1020,6 @@ class StateMechine:
sTable = self._db.getFixedSuperTable() sTable = self._db.getFixedSuperTable()
if sTable.hasRegTables(dbc): # no regular tables if sTable.hasRegTables(dbc): # no regular tables
# print("debug=====*\n"*100) # print("debug=====*\n"*100)
Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time())) Logging.debug("[STT] SUPER_TABLE_ONLY found, between {} and {}".format(ts, time.time()))
...@@ -1096,22 +1099,24 @@ class StateMechine: ...@@ -1096,22 +1099,24 @@ class StateMechine:
weightsTypes = BasicTypes.copy() weightsTypes = BasicTypes.copy()
# this matrixs can balance the Frequency of TaskTypes # this matrixs can balance the Frequency of TaskTypes
balance_TaskType_matrixs = {'TaskDropDb': 5 , 'TaskDropTopics': 20 , 'TaskDropStreams':10 , 'TaskDropStreamTables':10 , balance_TaskType_matrixs = {'TaskDropDb': 5, 'TaskDropTopics': 20, 'TaskDropStreams': 10,
'TaskReadData':50 , 'TaskDropSuperTable':5 , 'TaskAlterTags':3 , 'TaskAddData':10, 'TaskDropStreamTables': 10,
'TaskDeleteData':10 , 'TaskCreateDb':10 , 'TaskCreateStream': 3, 'TaskCreateTopic' :3, 'TaskReadData': 50, 'TaskDropSuperTable': 5, 'TaskAlterTags': 3, 'TaskAddData': 10,
'TaskCreateConsumers':10, 'TaskCreateSuperTable': 10 } # TaskType : balance_matrixs of task 'TaskDeleteData': 10, 'TaskCreateDb': 10, 'TaskCreateStream': 3,
'TaskCreateTopic': 3,
'TaskCreateConsumers': 10,
'TaskCreateSuperTable': 10} # TaskType : balance_matrixs of task
for task , weights in balance_TaskType_matrixs.items(): for task, weights in balance_TaskType_matrixs.items():
for basicType in BasicTypes: for basicType in BasicTypes:
if basicType.__name__ == task: if basicType.__name__ == task:
for _ in range(weights): for _ in range(weights):
weightsTypes.append(basicType) weightsTypes.append(basicType)
task = random.sample(weightsTypes,1) task = random.sample(weightsTypes, 1)
return task[0] return task[0]
# ref: # ref:
# https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/ # https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python/
def _weighted_choice_sub(self, weights) -> int: def _weighted_choice_sub(self, weights) -> int:
...@@ -1123,6 +1128,7 @@ class StateMechine: ...@@ -1123,6 +1128,7 @@ class StateMechine:
return i return i
raise CrashGenError("Unexpected no choice") raise CrashGenError("Unexpected no choice")
class Database: class Database:
''' We use this to represent an actual TDengine database inside a service instance, ''' We use this to represent an actual TDengine database inside a service instance,
possibly in a cluster environment. possibly in a cluster environment.
...@@ -1184,9 +1190,9 @@ class Database: ...@@ -1184,9 +1190,9 @@ class Database:
# start time will be auto generated , start at 10 years ago local time # start time will be auto generated , start at 10 years ago local time
local_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16] local_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-16]
local_epoch_time = [int(i) for i in local_time.split("-")] local_epoch_time = [int(i) for i in local_time.split("-")]
#local_epoch_time will be such as : [2022, 7, 18] # local_epoch_time will be such as : [2022, 7, 18]
t1 = datetime.datetime(local_epoch_time[0]-5, local_epoch_time[1], local_epoch_time[2]) t1 = datetime.datetime(local_epoch_time[0] - 5, local_epoch_time[1], local_epoch_time[2])
t2 = datetime.datetime.now() t2 = datetime.datetime.now()
# maybe a very large number, takes 69 years to exceed Python int range # maybe a very large number, takes 69 years to exceed Python int range
elSec = int(t2.timestamp() - t1.timestamp()) elSec = int(t2.timestamp() - t1.timestamp())
...@@ -1194,7 +1200,8 @@ class Database: ...@@ -1194,7 +1200,8 @@ class Database:
500 # a number representing seconds within 10 years 500 # a number representing seconds within 10 years
# print("elSec = {}".format(elSec)) # print("elSec = {}".format(elSec))
t3 = datetime.datetime(local_epoch_time[0]-10, local_epoch_time[1], local_epoch_time[2]) # default "keep" is 10 years t3 = datetime.datetime(local_epoch_time[0] - 10, local_epoch_time[1],
local_epoch_time[2]) # default "keep" is 10 years
t4 = datetime.datetime.fromtimestamp( t4 = datetime.datetime.fromtimestamp(
t3.timestamp() + elSec2) # see explanation above t3.timestamp() + elSec2) # see explanation above
Logging.debug("Setting up TICKS to start from: {}".format(t4)) Logging.debug("Setting up TICKS to start from: {}".format(t4))
...@@ -1206,15 +1213,18 @@ class Database: ...@@ -1206,15 +1213,18 @@ class Database:
Fetch a timestamp tick, with some random factor, may not be unique. Fetch a timestamp tick, with some random factor, may not be unique.
''' '''
with cls._clsLock: # prevent duplicate tick with cls._clsLock: # prevent duplicate tick
if cls._lastLaggingTick is None or cls._lastTick is None : # not initialized if cls._lastLaggingTick is None or cls._lastTick is None: # not initialized
# 10k at 1/20 chance, should be enough to avoid overlaps # 10k at 1/20 chance, should be enough to avoid overlaps
tick = cls.setupLastTick() tick = cls.setupLastTick()
cls._lastTick = tick cls._lastTick = tick
cls._lastLaggingTick = tick + datetime.timedelta(0, -60*2) # lagging behind 2 minutes, should catch up fast cls._lastLaggingTick = tick + datetime.timedelta(0,
-60 * 2) # lagging behind 2 minutes, should catch up fast
# if : # should be quite a bit into the future # if : # should be quite a bit into the future
if Config.isSet('mix_oos_data') and Dice.throw(20) == 0: # if asked to do so, and 1 in 20 chance, return lagging tick if Config.isSet('mix_oos_data') and Dice.throw(
cls._lastLaggingTick += datetime.timedelta(0, 1) # pick the next sequence from the lagging tick sequence 20) == 0: # if asked to do so, and 1 in 20 chance, return lagging tick
cls._lastLaggingTick += datetime.timedelta(0,
1) # pick the next sequence from the lagging tick sequence
return cls._lastLaggingTick return cls._lastLaggingTick
else: # regular else: # regular
# add one second to it # add one second to it
...@@ -1334,8 +1344,6 @@ class Task(): ...@@ -1334,8 +1344,6 @@ class Task():
self._execStats = execStats self._execStats = execStats
self._db = db # A task is always associated/for a specific DB self._db = db # A task is always associated/for a specific DB
def isSuccess(self): def isSuccess(self):
return self._err is None return self._err is None
...@@ -1413,12 +1421,9 @@ class Task(): ...@@ -1413,12 +1421,9 @@ class Task():
0x396, # Database in creating status 0x396, # Database in creating status
0x386, # Database in droping status 0x386, # Database in droping status
0x03E1, # failed on tmq_subscribe ,topic not exist 0x03E1, # failed on tmq_subscribe ,topic not exist
0x03ed , # Topic must be dropped first, SQL: drop database db_0 0x03ed, # Topic must be dropped first, SQL: drop database db_0
0x0203 , # Invalid value 0x0203, # Invalid value
0x03f0 , # Stream already exist , topic already exists 0x03f0, # Stream already exist , topic already exists
1000 # REST catch-all error 1000 # REST catch-all error
]: ]:
...@@ -1430,7 +1435,7 @@ class Task(): ...@@ -1430,7 +1435,7 @@ class Task():
moreErrnos = [int(v, 0) for v in Config.getConfig().ignore_errors.split(',')] moreErrnos = [int(v, 0) for v in Config.getConfig().ignore_errors.split(',')]
if errno in moreErrnos: if errno in moreErrnos:
return True return True
elif errno == 0x200 : # invalid SQL, we need to div in a bit more elif errno == 0x200: # invalid SQL, we need to div in a bit more
if msg.find("invalid column name") != -1: if msg.find("invalid column name") != -1:
return True return True
elif msg.find("tags number not matched") != -1: # mismatched tags after modification elif msg.find("tags number not matched") != -1: # mismatched tags after modification
...@@ -1443,7 +1448,6 @@ class Task(): ...@@ -1443,7 +1448,6 @@ class Task():
return False # Not an acceptable error return False # Not an acceptable error
def execute(self, wt: WorkerThread): def execute(self, wt: WorkerThread):
wt.verifyThreadSelf() wt.verifyThreadSelf()
self._workerThread = wt # type: ignore self._workerThread = wt # type: ignore
...@@ -1485,7 +1489,8 @@ class Task(): ...@@ -1485,7 +1489,8 @@ class Task():
# raise # so that we see full stack # raise # so that we see full stack
traceback.print_exc() traceback.print_exc()
print( print(
"\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format(errMsg) + "\n\n----------------------------\nProgram ABORTED Due to Unexpected TAOS Error: \n\n{}\n".format(
errMsg) +
"----------------------------\n") "----------------------------\n")
# sys.exit(-1) # sys.exit(-1)
self._err = err self._err = err
...@@ -1592,7 +1597,7 @@ class ExecutionStats: ...@@ -1592,7 +1597,7 @@ class ExecutionStats:
if klassName not in self._errors: if klassName not in self._errors:
self._errors[klassName] = {} self._errors[klassName] = {}
errors = self._errors[klassName] errors = self._errors[klassName]
errors[eno] = errors[eno]+1 if eno in errors else 1 errors[eno] = errors[eno] + 1 if eno in errors else 1
def beginTaskType(self, klassName): def beginTaskType(self, klassName):
with self._lock: with self._lock:
...@@ -1615,7 +1620,7 @@ class ExecutionStats: ...@@ -1615,7 +1620,7 @@ class ExecutionStats:
Logging.info( Logging.info(
"----------------------------------------------------------------------") "----------------------------------------------------------------------")
Logging.info( Logging.info(
"| Crash_Gen test {}, with the following stats:". format( "| Crash_Gen test {}, with the following stats:".format(
"FAILED (reason: {})".format( "FAILED (reason: {})".format(
self._failureReason) if self._failed else "SUCCEEDED")) self._failureReason) if self._failed else "SUCCEEDED"))
Logging.info("| Task Execution Times (success/total):") Logging.info("| Task Execution Times (success/total):")
...@@ -1648,7 +1653,7 @@ class ExecutionStats: ...@@ -1648,7 +1653,7 @@ class ExecutionStats:
Logging.info("| Active DB Native Connections (now): {}".format(DbConnNative.totalConnections)) Logging.info("| Active DB Native Connections (now): {}".format(DbConnNative.totalConnections))
Logging.info("| Longest native query time: {:.3f} seconds, started: {}". Logging.info("| Longest native query time: {:.3f} seconds, started: {}".
format(MyTDSql.longestQueryTime, format(MyTDSql.longestQueryTime,
time.strftime("%x %X", time.localtime(MyTDSql.lqStartTime))) ) time.strftime("%x %X", time.localtime(MyTDSql.lqStartTime))))
Logging.info("| Longest native query: {}".format(MyTDSql.longestQuery)) Logging.info("| Longest native query: {}".format(MyTDSql.longestQuery))
Logging.info( Logging.info(
"----------------------------------------------------------------------") "----------------------------------------------------------------------")
...@@ -1687,7 +1692,7 @@ class StateTransitionTask(Task): ...@@ -1687,7 +1692,7 @@ class StateTransitionTask(Task):
@classmethod @classmethod
def getRegTableName(cls, i): def getRegTableName(cls, i):
if ( StateTransitionTask._baseTableNumber is None): # Set it one time if (StateTransitionTask._baseTableNumber is None): # Set it one time
StateTransitionTask._baseTableNumber = Dice.throw( StateTransitionTask._baseTableNumber = Dice.throw(
999) if Config.getConfig().dynamic_db_table_names else 0 999) if Config.getConfig().dynamic_db_table_names else 0
return "reg_table_{}".format(StateTransitionTask._baseTableNumber + i) return "reg_table_{}".format(StateTransitionTask._baseTableNumber + i)
...@@ -1714,13 +1719,18 @@ class TaskCreateDb(StateTransitionTask): ...@@ -1714,13 +1719,18 @@ class TaskCreateDb(StateTransitionTask):
numReplica = Config.getConfig().num_replicas # fixed, always numReplica = Config.getConfig().num_replicas # fixed, always
repStr = "replica {}".format(numReplica) repStr = "replica {}".format(numReplica)
updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1 updatePostfix = "" if Config.getConfig().verify_data else "" # allow update only when "verify data" is active , 3.0 version default is update 1
vg_nums = random.randint(1,8) vg_nums = random.randint(1, 8)
cache_model = Dice.choice(['none' , 'last_row' , 'last_value' , 'both']) cache_model = Dice.choice(['none', 'last_row', 'last_value', 'both'])
buffer = random.randint(3,128) buffer = random.randint(3, 128)
dbName = self._db.getName() dbName = self._db.getName()
self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr, updatePostfix, vg_nums, cache_model,buffer ) ) self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr,
updatePostfix,
vg_nums,
cache_model,
buffer))
if dbName == "db_0" and Config.getConfig().use_shadow_db: if dbName == "db_0" and Config.getConfig().use_shadow_db:
self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix ) ) self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix))
class TaskDropDb(StateTransitionTask): class TaskDropDb(StateTransitionTask):
@classmethod @classmethod
...@@ -1734,7 +1744,8 @@ class TaskDropDb(StateTransitionTask): ...@@ -1734,7 +1744,8 @@ class TaskDropDb(StateTransitionTask):
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
try: try:
self.queryWtSql(wt, "drop database {}".format(self._db.getName())) # drop database maybe failed ,because topic exists self.queryWtSql(wt, "drop database {}".format(
self._db.getName())) # drop database maybe failed ,because topic exists
except taos.error.ProgrammingError as err: except taos.error.ProgrammingError as err:
errno = Helper.convertErrno(err.errno) errno = Helper.convertErrno(err.errno)
if errno in [0x0203]: # drop maybe failed if errno in [0x0203]: # drop maybe failed
...@@ -1756,9 +1767,9 @@ class TaskCreateStream(StateTransitionTask): ...@@ -1756,9 +1767,9 @@ class TaskCreateStream(StateTransitionTask):
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
dbname = self._db.getName() dbname = self._db.getName()
sub_stream_name = dbname+ '_sub_stream' sub_stream_name = dbname + '_sub_stream'
sub_stream_tb_name = 'stream_tb_sub' sub_stream_tb_name = 'stream_tb_sub'
super_stream_name = dbname+ '_super_stream' super_stream_name = dbname + '_super_stream'
super_stream_tb_name = 'stream_tb_super' super_stream_tb_name = 'stream_tb_super'
if not self._db.exists(wt.getDbConn()): if not self._db.exists(wt.getDbConn()):
Logging.debug("Skipping task, no DB yet") Logging.debug("Skipping task, no DB yet")
...@@ -1766,10 +1777,11 @@ class TaskCreateStream(StateTransitionTask): ...@@ -1766,10 +1777,11 @@ class TaskCreateStream(StateTransitionTask):
sTable = self._db.getFixedSuperTable() # type: TdSuperTable sTable = self._db.getFixedSuperTable() # type: TdSuperTable
# wt.execSql("use db") # should always be in place # wt.execSql("use db") # should always be in place
stbname =sTable.getName() stbname = sTable.getName()
sub_tables = sTable.getRegTables(wt.getDbConn()) sub_tables = sTable.getRegTables(wt.getDbConn())
aggExpr = Dice.choice([ aggExpr = Dice.choice([
'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)','min(speed)', 'max(speed)', 'first(speed)', 'last(speed)', 'count(*)', 'avg(speed)', 'sum(speed)', 'stddev(speed)', 'min(speed)', 'max(speed)', 'first(speed)',
'last(speed)',
'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)']) 'apercentile(speed, 10)', 'last_row(*)', 'twa(speed)'])
stream_sql = '' # set default value stream_sql = '' # set default value
...@@ -1777,11 +1789,11 @@ class TaskCreateStream(StateTransitionTask): ...@@ -1777,11 +1789,11 @@ class TaskCreateStream(StateTransitionTask):
if sub_tables: if sub_tables:
sub_tbname = sub_tables[0] sub_tbname = sub_tables[0]
# create stream with query above sub_table # create stream with query above sub_table
stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\ stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '. \
format(sub_stream_name,dbname,sub_stream_tb_name ,aggExpr,dbname,sub_tbname) format(sub_stream_name, dbname, sub_stream_tb_name, aggExpr, dbname, sub_tbname)
else: else:
stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '.\ stream_sql = 'create stream {} into {}.{} as select {}, avg(speed) FROM {}.{} PARTITION BY tbname INTERVAL(5s) SLIDING(3s) '. \
format(super_stream_name,dbname,super_stream_tb_name,aggExpr, dbname,stbname) format(super_stream_name, dbname, super_stream_tb_name, aggExpr, dbname, stbname)
self.execWtSql(wt, stream_sql) self.execWtSql(wt, stream_sql)
Logging.debug("[OPS] stream is creating at {}".format(time.time())) Logging.debug("[OPS] stream is creating at {}".format(time.time()))
...@@ -1799,10 +1811,10 @@ class TaskCreateTopic(StateTransitionTask): ...@@ -1799,10 +1811,10 @@ class TaskCreateTopic(StateTransitionTask):
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
dbname = self._db.getName() dbname = self._db.getName()
sub_topic_name = dbname+ '_sub_topic' sub_topic_name = dbname + '_sub_topic'
super_topic_name = dbname+ '_super_topic' super_topic_name = dbname + '_super_topic'
stable_topic = dbname+ '_stable_topic' stable_topic = dbname + '_stable_topic'
db_topic = 'database_' + dbname+ '_topics' db_topic = 'database_' + dbname + '_topics'
if not self._db.exists(wt.getDbConn()): if not self._db.exists(wt.getDbConn()):
Logging.debug("Skipping task, no DB yet") Logging.debug("Skipping task, no DB yet")
return return
...@@ -1811,27 +1823,33 @@ class TaskCreateTopic(StateTransitionTask): ...@@ -1811,27 +1823,33 @@ class TaskCreateTopic(StateTransitionTask):
# wt.execSql("use db") # should always be in place # wt.execSql("use db") # should always be in place
# create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1; # create topic if not exists topic_ctb_column as select ts, c1, c2, c3 from stb1;
stbname =sTable.getName() stbname = sTable.getName()
sub_tables = sTable.getRegTables(wt.getDbConn()) sub_tables = sTable.getRegTables(wt.getDbConn())
scalarExpr = Dice.choice([ '*','speed','color','abs(speed)','acos(speed)','asin(speed)','atan(speed)','ceil(speed)','cos(speed)','cos(speed)', scalarExpr = Dice.choice(
'floor(speed)','log(speed,2)','pow(speed,2)','round(speed)','sin(speed)','sqrt(speed)','char_length(color)','concat(color,color)', ['*', 'speed', 'color', 'abs(speed)', 'acos(speed)', 'asin(speed)', 'atan(speed)', 'ceil(speed)',
'concat_ws(" ", color,color," ")','length(color)', 'lower(color)', 'ltrim(color)','substr(color , 2)','upper(color)','cast(speed as double)', 'cos(speed)', 'cos(speed)',
'floor(speed)', 'log(speed,2)', 'pow(speed,2)', 'round(speed)', 'sin(speed)', 'sqrt(speed)',
'char_length(color)', 'concat(color,color)',
'concat_ws(" ", color,color," ")', 'length(color)', 'lower(color)', 'ltrim(color)', 'substr(color , 2)',
'upper(color)', 'cast(speed as double)',
'cast(ts as bigint)']) 'cast(ts as bigint)'])
topic_sql = '' # set default value topic_sql = '' # set default value
if Dice.throw(3)==0: # create topic : source data from sub query if Dice.throw(3) == 0: # create topic : source data from sub query
if sub_tables: # if not empty if sub_tables: # if not empty
sub_tbname = sub_tables[0] sub_tbname = sub_tables[0]
# create topic : source data from sub query of sub stable # create topic : source data from sub query of sub stable
topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name,scalarExpr,dbname,sub_tbname) topic_sql = 'create topic {} as select {} FROM {}.{} ; '.format(sub_topic_name, scalarExpr, dbname,
sub_tbname)
else: # create topic : source data from sub query of stable else: # create topic : source data from sub query of stable
topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name,scalarExpr, dbname,stbname) topic_sql = 'create topic {} as select {} FROM {}.{} '.format(super_topic_name, scalarExpr, dbname,
elif Dice.throw(3)==1: # create topic : source data from super table stbname)
topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic,dbname,stbname) elif Dice.throw(3) == 1: # create topic : source data from super table
topic_sql = 'create topic {} AS STABLE {}.{} '.format(stable_topic, dbname, stbname)
elif Dice.throw(3)==2: # create topic : source data from whole database elif Dice.throw(3) == 2: # create topic : source data from whole database
topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic,dbname) topic_sql = 'create topic {} AS DATABASE {} '.format(db_topic, dbname)
else: else:
pass pass
...@@ -1840,6 +1858,7 @@ class TaskCreateTopic(StateTransitionTask): ...@@ -1840,6 +1858,7 @@ class TaskCreateTopic(StateTransitionTask):
self.execWtSql(wt, topic_sql) self.execWtSql(wt, topic_sql)
Logging.debug("[OPS] db topic is creating at {}".format(time.time())) Logging.debug("[OPS] db topic is creating at {}".format(time.time()))
class TaskDropTopics(StateTransitionTask): class TaskDropTopics(StateTransitionTask):
@classmethod @classmethod
...@@ -1853,7 +1872,6 @@ class TaskDropTopics(StateTransitionTask): ...@@ -1853,7 +1872,6 @@ class TaskDropTopics(StateTransitionTask):
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
dbname = self._db.getName() dbname = self._db.getName()
if not self._db.exists(wt.getDbConn()): if not self._db.exists(wt.getDbConn()):
Logging.debug("Skipping task, no DB yet") Logging.debug("Skipping task, no DB yet")
return return
...@@ -1862,8 +1880,9 @@ class TaskDropTopics(StateTransitionTask): ...@@ -1862,8 +1880,9 @@ class TaskDropTopics(StateTransitionTask):
# wt.execSql("use db") # should always be in place # wt.execSql("use db") # should always be in place
tblName = sTable.getName() tblName = sTable.getName()
if sTable.hasTopics(wt.getDbConn()): if sTable.hasTopics(wt.getDbConn()):
sTable.dropTopics(wt.getDbConn(),dbname,None) # drop topics of database sTable.dropTopics(wt.getDbConn(), dbname, None) # drop topics of database
sTable.dropTopics(wt.getDbConn(),dbname,tblName) # drop topics of stable sTable.dropTopics(wt.getDbConn(), dbname, tblName) # drop topics of stable
class TaskDropStreams(StateTransitionTask): class TaskDropStreams(StateTransitionTask):
...@@ -1878,7 +1897,6 @@ class TaskDropStreams(StateTransitionTask): ...@@ -1878,7 +1897,6 @@ class TaskDropStreams(StateTransitionTask):
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
# dbname = self._db.getName() # dbname = self._db.getName()
if not self._db.exists(wt.getDbConn()): if not self._db.exists(wt.getDbConn()):
Logging.debug("Skipping task, no DB yet") Logging.debug("Skipping task, no DB yet")
return return
...@@ -1889,6 +1907,7 @@ class TaskDropStreams(StateTransitionTask): ...@@ -1889,6 +1907,7 @@ class TaskDropStreams(StateTransitionTask):
if sTable.hasStreams(wt.getDbConn()): if sTable.hasStreams(wt.getDbConn()):
sTable.dropStreams(wt.getDbConn()) # drop stream of database sTable.dropStreams(wt.getDbConn()) # drop stream of database
class TaskDropStreamTables(StateTransitionTask): class TaskDropStreamTables(StateTransitionTask):
@classmethod @classmethod
...@@ -1902,7 +1921,6 @@ class TaskDropStreamTables(StateTransitionTask): ...@@ -1902,7 +1921,6 @@ class TaskDropStreamTables(StateTransitionTask):
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
# dbname = self._db.getName() # dbname = self._db.getName()
if not self._db.exists(wt.getDbConn()): if not self._db.exists(wt.getDbConn()):
Logging.debug("Skipping task, no DB yet") Logging.debug("Skipping task, no DB yet")
return return
...@@ -1913,6 +1931,7 @@ class TaskDropStreamTables(StateTransitionTask): ...@@ -1913,6 +1931,7 @@ class TaskDropStreamTables(StateTransitionTask):
if sTable.hasStreamTables(wt.getDbConn()): if sTable.hasStreamTables(wt.getDbConn()):
sTable.dropStreamTables(wt.getDbConn()) # drop stream tables sTable.dropStreamTables(wt.getDbConn()) # drop stream tables
class TaskCreateConsumers(StateTransitionTask): class TaskCreateConsumers(StateTransitionTask):
@classmethod @classmethod
...@@ -1930,7 +1949,7 @@ class TaskCreateConsumers(StateTransitionTask): ...@@ -1930,7 +1949,7 @@ class TaskCreateConsumers(StateTransitionTask):
sTable = self._db.getFixedSuperTable() # type: TdSuperTable sTable = self._db.getFixedSuperTable() # type: TdSuperTable
# wt.execSql("use db") # should always be in place # wt.execSql("use db") # should always be in place
if sTable.hasTopics(wt.getDbConn()): if sTable.hasTopics(wt.getDbConn()):
sTable.createConsumer(wt.getDbConn(),random.randint(1,10)) sTable.createConsumer(wt.getDbConn(), random.randint(1, 10))
pass pass
else: else:
print(" restful not support tmq consumers") print(" restful not support tmq consumers")
...@@ -1974,10 +1993,9 @@ class TdSuperTable: ...@@ -1974,10 +1993,9 @@ class TdSuperTable:
def getName(self): def getName(self):
return self._stName return self._stName
def drop(self, dbc, skipCheck=False):
def drop(self, dbc, skipCheck = False):
dbName = self._dbName dbName = self._dbName
if self.exists(dbc) : # if myself exists if self.exists(dbc): # if myself exists
fullTableName = dbName + '.' + self._stName fullTableName = dbName + '.' + self._stName
dbc.execute("DROP TABLE {}".format(fullTableName)) dbc.execute("DROP TABLE {}".format(fullTableName))
else: else:
...@@ -1989,7 +2007,7 @@ class TdSuperTable: ...@@ -1989,7 +2007,7 @@ class TdSuperTable:
return dbc.existsSuperTable(self._stName) return dbc.existsSuperTable(self._stName)
# TODO: odd semantic, create() method is usually static? # TODO: odd semantic, create() method is usually static?
def create(self, dbc, cols: TdColumns, tags: TdTags, dropIfExists = False): def create(self, dbc, cols: TdColumns, tags: TdTags, dropIfExists=False):
'''Creating a super table''' '''Creating a super table'''
dbName = self._dbName dbName = self._dbName
...@@ -2006,44 +2024,35 @@ class TdSuperTable: ...@@ -2006,44 +2024,35 @@ class TdSuperTable:
# Now let's create # Now let's create
sql = "CREATE TABLE {} ({})".format( sql = "CREATE TABLE {} ({})".format(
fullTableName, fullTableName,
",".join(['%s %s'%(k,v.value) for (k,v) in cols.items()])) ",".join(['%s %s' % (k, v.value) for (k, v) in cols.items()]))
if tags : if tags:
sql += " TAGS ({})".format( sql += " TAGS ({})".format(
",".join(['%s %s'%(k,v.value) for (k,v) in tags.items()]) ",".join(['%s %s' % (k, v.value) for (k, v) in tags.items()])
) )
else: else:
sql += " TAGS (dummy int) " sql += " TAGS (dummy int) "
dbc.execute(sql) dbc.execute(sql)
def createConsumer(self, dbc,Consumer_nums): def createConsumer(self, dbc, Consumer_nums):
def generateConsumer(current_topic_list): def generateConsumer(current_topic_list):
conf = TaosTmqConf() consumer = Consumer({"group.id": "tg2", "td.connect.user": "root", "td.connect.pass": "taosdata"})
conf.set("group.id", "tg2") topic_list = []
conf.set("td.connect.user", "root")
conf.set("td.connect.pass", "taosdata")
# conf.set("enable.auto.commit", "true")
# def tmq_commit_cb_print(tmq, resp, offset, param=None):
# print(f"commit: {resp}, tmq: {tmq}, offset: {offset}, param: {param}")
# conf.set_auto_commit_cb(tmq_commit_cb_print, None)
consumer = conf.new_consumer()
topic_list = TaosTmqList()
for topic in current_topic_list: for topic in current_topic_list:
topic_list.append(topic) topic_list.append(topic)
try:
consumer.subscribe(topic_list) consumer.subscribe(topic_list)
except TmqError as e :
pass
# consumer with random work life # consumer with random work life
time_start = time.time() time_start = time.time()
while 1: while 1:
res = consumer.poll(1000) res = consumer.poll(1)
if time.time() - time_start >random.randint(5,50) : consumer.commit(res)
if time.time() - time_start > random.randint(5, 50):
break break
try: try:
consumer.unsubscribe() consumer.unsubscribe()
except TmqError as e : except TmqError as e:
pass pass
return return
...@@ -2067,14 +2076,16 @@ class TdSuperTable: ...@@ -2067,14 +2076,16 @@ class TdSuperTable:
def getRegTables(self, dbc: DbConn): def getRegTables(self, dbc: DbConn):
dbName = self._dbName dbName = self._dbName
try: try:
dbc.query("select distinct TBNAME from {}.{}".format(dbName, self._stName)) # TODO: analyze result set later dbc.query("select distinct TBNAME from {}.{}".format(dbName,
self._stName)) # TODO: analyze result set later
except taos.error.ProgrammingError as err: except taos.error.ProgrammingError as err:
errno2 = Helper.convertErrno(err.errno) errno2 = Helper.convertErrno(err.errno)
Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err)) Logging.debug("[=] Failed to get tables from super table: errno=0x{:X}, msg: {}".format(errno2, err))
raise raise
qr = dbc.getQueryResult() qr = dbc.getQueryResult()
return [v[0] for v in qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation return [v[0] for v in
qr] # list transformation, ref: https://stackoverflow.com/questions/643823/python-list-transformation
def hasRegTables(self, dbc: DbConn): def hasRegTables(self, dbc: DbConn):
...@@ -2084,28 +2095,28 @@ class TdSuperTable: ...@@ -2084,28 +2095,28 @@ class TdSuperTable:
else: else:
return False return False
def hasStreamTables(self,dbc: DbConn): def hasStreamTables(self, dbc: DbConn):
return dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) > 0 return dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) > 0
def hasStreams(self,dbc: DbConn): def hasStreams(self, dbc: DbConn):
return dbc.query("show streams") > 0 return dbc.query("show streams") > 0
def hasTopics(self,dbc: DbConn): def hasTopics(self, dbc: DbConn):
return dbc.query("show topics") > 0 return dbc.query("show topics") > 0
def dropTopics(self,dbc: DbConn , dbname=None,stb_name=None): def dropTopics(self, dbc: DbConn, dbname=None, stb_name=None):
dbc.query("show topics ") dbc.query("show topics ")
topics = dbc.getQueryResult() topics = dbc.getQueryResult()
if dbname !=None and stb_name == None : if dbname != None and stb_name == None:
for topic in topics: for topic in topics:
if dbname in topic[0] and topic[0].startswith("database"): if dbname in topic[0] and topic[0].startswith("database"):
try: try:
dbc.execute('drop topic {}'.format(topic[0])) dbc.execute('drop topic {}'.format(topic[0]))
Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time())) Logging.debug("[OPS] topic {} is droping at {}".format(topic, time.time()))
except taos.error.ProgrammingError as err: except taos.error.ProgrammingError as err:
errno = Helper.convertErrno(err.errno) errno = Helper.convertErrno(err.errno)
if errno in [0x03EB]: # Topic subscribed cannot be dropped if errno in [0x03EB]: # Topic subscribed cannot be dropped
...@@ -2117,17 +2128,17 @@ class TdSuperTable: ...@@ -2117,17 +2128,17 @@ class TdSuperTable:
pass pass
return True return True
elif dbname !=None and stb_name!= None: elif dbname != None and stb_name != None:
for topic in topics: for topic in topics:
if topic[0].startswith(self._dbName) and topic[0].endswith('topic'): if topic[0].startswith(self._dbName) and topic[0].endswith('topic'):
dbc.execute('drop topic {}'.format(topic[0])) dbc.execute('drop topic {}'.format(topic[0]))
Logging.debug("[OPS] topic {} is droping at {}".format(topic,time.time())) Logging.debug("[OPS] topic {} is droping at {}".format(topic, time.time()))
return True return True
else: else:
return True return True
pass pass
def dropStreams(self,dbc:DbConn): def dropStreams(self, dbc: DbConn):
dbc.query("show streams ") dbc.query("show streams ")
Streams = dbc.getQueryResult() Streams = dbc.getQueryResult()
for Stream in Streams: for Stream in Streams:
...@@ -2143,7 +2154,7 @@ class TdSuperTable: ...@@ -2143,7 +2154,7 @@ class TdSuperTable:
for StreamTable in StreamTables: for StreamTable in StreamTables:
if self.dropStreams(dbc): if self.dropStreams(dbc):
dbc.execute('drop table {}.{}'.format(self._dbName,StreamTable[0])) dbc.execute('drop table {}.{}'.format(self._dbName, StreamTable[0]))
return not dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName)) return not dbc.query("show {}.stables like 'stream_tb%'".format(self._dbName))
...@@ -2155,7 +2166,7 @@ class TdSuperTable: ...@@ -2155,7 +2166,7 @@ class TdSuperTable:
''' '''
dbName = self._dbName dbName = self._dbName
sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName) sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName)
if dbc.query(sql) >= 1 : # reg table exists already if dbc.query(sql) >= 1: # reg table exists already
return return
# acquire a lock first, so as to be able to *verify*. More details in TD-1471 # acquire a lock first, so as to be able to *verify*. More details in TD-1471
...@@ -2179,14 +2190,14 @@ class TdSuperTable: ...@@ -2179,14 +2190,14 @@ class TdSuperTable:
task.unlockTable(fullTableName) # no matter what task.unlockTable(fullTableName) # no matter what
# Logging.info("Table unlocked after creation: {}".format(fullTableName)) # Logging.info("Table unlocked after creation: {}".format(fullTableName))
def _getTagStrForSql(self, dbc) : def _getTagStrForSql(self, dbc):
tags = self._getTags(dbc) tags = self._getTags(dbc)
tagStrs = [] tagStrs = []
for tagName in tags: for tagName in tags:
tagType = tags[tagName] tagType = tags[tagName]
if tagType == 'BINARY': if tagType == 'BINARY':
tagStrs.append("'Beijing-Shanghai-LosAngeles'") tagStrs.append("'Beijing-Shanghai-LosAngeles'")
elif tagType== 'VARCHAR': elif tagType == 'VARCHAR':
tagStrs.append("'London-Paris-Berlin'") tagStrs.append("'London-Paris-Berlin'")
elif tagType == 'FLOAT': elif tagType == 'FLOAT':
tagStrs.append('9.9') tagStrs.append('9.9')
...@@ -2200,7 +2211,7 @@ class TdSuperTable: ...@@ -2200,7 +2211,7 @@ class TdSuperTable:
dbc.query("DESCRIBE {}.{}".format(self._dbName, self._stName)) dbc.query("DESCRIBE {}.{}".format(self._dbName, self._stName))
stCols = dbc.getQueryResult() stCols = dbc.getQueryResult()
# print(stCols) # print(stCols)
ret = {row[0]:row[1] for row in stCols if row[3]=='TAG'} # name:type ret = {row[0]: row[1] for row in stCols if row[3] == 'TAG'} # name:type
# print("Tags retrieved: {}".format(ret)) # print("Tags retrieved: {}".format(ret))
return ret return ret
...@@ -2317,11 +2328,10 @@ class TdSuperTable: ...@@ -2317,11 +2328,10 @@ class TdSuperTable:
]) # TODO: add more from 'top' ]) # TODO: add more from 'top'
# if aggExpr not in ['stddev(speed)']: # STDDEV not valid for super tables?! (Done in TD-1049) # if aggExpr not in ['stddev(speed)']: # STDDEV not valid for super tables?! (Done in TD-1049)
sql = "select {} from {}.{}".format(aggExpr, self._dbName, self.getName()) sql = "select {} from {}.{}".format(aggExpr, self._dbName, self.getName())
if Dice.throw(3) == 0: # 1 in X chance if Dice.throw(3) == 0: # 1 in X chance
partion_expr = Dice.choice(['color','tbname']) partion_expr = Dice.choice(['color', 'tbname'])
sql = sql + ' partition BY ' + partion_expr + ' order by ' + partion_expr sql = sql + ' partition BY ' + partion_expr + ' order by ' + partion_expr
Progress.emit(Progress.QUERY_GROUP_BY) Progress.emit(Progress.QUERY_GROUP_BY)
# Logging.info("Executing GROUP-BY query: " + sql) # Logging.info("Executing GROUP-BY query: " + sql)
...@@ -2329,6 +2339,7 @@ class TdSuperTable: ...@@ -2329,6 +2339,7 @@ class TdSuperTable:
return ret return ret
class TaskReadData(StateTransitionTask): class TaskReadData(StateTransitionTask):
@classmethod @classmethod
def getEndState(cls): def getEndState(cls):
...@@ -2345,7 +2356,7 @@ class TaskReadData(StateTransitionTask): ...@@ -2345,7 +2356,7 @@ class TaskReadData(StateTransitionTask):
def _reconnectIfNeeded(self, wt): def _reconnectIfNeeded(self, wt):
# 1 in 20 chance, simulate a broken connection, only if service stable (not restarting) # 1 in 20 chance, simulate a broken connection, only if service stable (not restarting)
if random.randrange(20)==0: # and self._canRestartService(): # TODO: break connection in all situations if random.randrange(20) == 0: # and self._canRestartService(): # TODO: break connection in all situations
# Logging.info("Attempting to reconnect to server") # TODO: change to DEBUG # Logging.info("Attempting to reconnect to server") # TODO: change to DEBUG
Progress.emit(Progress.SERVICE_RECONNECT_START) Progress.emit(Progress.SERVICE_RECONNECT_START)
try: try:
...@@ -2368,7 +2379,6 @@ class TaskReadData(StateTransitionTask): ...@@ -2368,7 +2379,6 @@ class TaskReadData(StateTransitionTask):
# by now, causing error below to be incorrectly handled due to timing issue # by now, causing error below to be incorrectly handled due to timing issue
return # TODO: fix server restart status race condtion return # TODO: fix server restart status race condtion
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
self._reconnectIfNeeded(wt) self._reconnectIfNeeded(wt)
...@@ -2386,6 +2396,7 @@ class TaskReadData(StateTransitionTask): ...@@ -2386,6 +2396,7 @@ class TaskReadData(StateTransitionTask):
Logging.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql())) Logging.debug("[=] Read Failure: errno=0x{:X}, msg: {}, SQL: {}".format(errno2, err, dbc.getLastSql()))
raise raise
class SqlQuery: class SqlQuery:
@classmethod @classmethod
def buildRandom(cls, db: Database): def buildRandom(cls, db: Database):
...@@ -2393,12 +2404,13 @@ class SqlQuery: ...@@ -2393,12 +2404,13 @@ class SqlQuery:
dbName = db.getName() dbName = db.getName()
def __init__(self, sql:str = None): def __init__(self, sql: str = None):
self._sql = sql self._sql = sql
def getSql(self): def getSql(self):
return self._sql return self._sql
class TaskDropSuperTable(StateTransitionTask): class TaskDropSuperTable(StateTransitionTask):
@classmethod @classmethod
def getEndState(cls): def getEndState(cls):
...@@ -2430,7 +2442,6 @@ class TaskDropSuperTable(StateTransitionTask): ...@@ -2430,7 +2442,6 @@ class TaskDropSuperTable(StateTransitionTask):
Logging.debug("[DB] Acceptable error when dropping a table") Logging.debug("[DB] Acceptable error when dropping a table")
continue # try to delete next regular table continue # try to delete next regular table
if (not tickOutput): if (not tickOutput):
tickOutput = True # Print only one time tickOutput = True # Print only one time
if isSuccess: if isSuccess:
...@@ -2443,8 +2454,6 @@ class TaskDropSuperTable(StateTransitionTask): ...@@ -2443,8 +2454,6 @@ class TaskDropSuperTable(StateTransitionTask):
self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName)) self.execWtSql(wt, "drop table {}.{}".format(self._db.getName(), tblName))
class TaskAlterTags(StateTransitionTask): class TaskAlterTags(StateTransitionTask):
@classmethod @classmethod
def getEndState(cls): def getEndState(cls):
...@@ -2472,6 +2481,7 @@ class TaskAlterTags(StateTransitionTask): ...@@ -2472,6 +2481,7 @@ class TaskAlterTags(StateTransitionTask):
sTable.changeTag(dbc, "extraTag", "newTag") sTable.changeTag(dbc, "extraTag", "newTag")
# sql = "alter table db.{} change tag extraTag newTag".format(tblName) # sql = "alter table db.{} change tag extraTag newTag".format(tblName)
class TaskRestartService(StateTransitionTask): class TaskRestartService(StateTransitionTask):
_isRunning = False _isRunning = False
_classLock = threading.Lock() _classLock = threading.Lock()
...@@ -2487,6 +2497,7 @@ class TaskRestartService(StateTransitionTask): ...@@ -2487,6 +2497,7 @@ class TaskRestartService(StateTransitionTask):
return False # don't run this otherwise return False # don't run this otherwise
CHANCE_TO_RESTART_SERVICE = 200 CHANCE_TO_RESTART_SERVICE = 200
def _executeInternal(self, te: TaskExecutor, wt: WorkerThread): def _executeInternal(self, te: TaskExecutor, wt: WorkerThread):
if not Config.getConfig().auto_start_service: # only execute when we are in -a mode if not Config.getConfig().auto_start_service: # only execute when we are in -a mode
print("_a", end="", flush=True) print("_a", end="", flush=True)
...@@ -2500,11 +2511,13 @@ class TaskRestartService(StateTransitionTask): ...@@ -2500,11 +2511,13 @@ class TaskRestartService(StateTransitionTask):
if Dice.throw(self.CHANCE_TO_RESTART_SERVICE) == 0: # 1 in N chance if Dice.throw(self.CHANCE_TO_RESTART_SERVICE) == 0: # 1 in N chance
dbc = wt.getDbConn() dbc = wt.getDbConn()
dbc.execute("select * from information_schema.ins_databases") # simple delay, align timing with other workers dbc.execute(
"select * from information_schema.ins_databases") # simple delay, align timing with other workers
gSvcMgr.restart() gSvcMgr.restart()
self._isRunning = False self._isRunning = False
class TaskAddData(StateTransitionTask): class TaskAddData(StateTransitionTask):
# Track which table is being actively worked on # Track which table is being actively worked on
activeTable: Set[int] = set() activeTable: Set[int] = set()
...@@ -2532,7 +2545,7 @@ class TaskAddData(StateTransitionTask): ...@@ -2532,7 +2545,7 @@ class TaskAddData(StateTransitionTask):
def canBeginFrom(cls, state: AnyState): def canBeginFrom(cls, state: AnyState):
return state.canAddData() return state.canAddData()
def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): def _lockTableIfNeeded(self, fullTableName, extraMsg=''):
if Config.getConfig().verify_data: if Config.getConfig().verify_data:
# Logging.info("Locking table: {}".format(fullTableName)) # Logging.info("Locking table: {}".format(fullTableName))
self.lockTable(fullTableName) self.lockTable(fullTableName)
...@@ -2571,8 +2584,6 @@ class TaskAddData(StateTransitionTask): ...@@ -2571,8 +2584,6 @@ class TaskAddData(StateTransitionTask):
# Logging.info("Data added in batch: {}".format(sql)) # Logging.info("Data added in batch: {}".format(sql))
self._unlockTableIfNeeded(fullTableName) self._unlockTableIfNeeded(fullTableName)
def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
...@@ -2590,7 +2601,8 @@ class TaskAddData(StateTransitionTask): ...@@ -2590,7 +2601,8 @@ class TaskAddData(StateTransitionTask):
# TODO: too ugly trying to lock the table reliably, refactor... # TODO: too ugly trying to lock the table reliably, refactor...
fullTableName = db.getName() + '.' + regTableName fullTableName = db.getName() + '.' + regTableName
self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock self._lockTableIfNeeded(
fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
try: try:
sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {}) sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {})
...@@ -2604,7 +2616,8 @@ class TaskAddData(StateTransitionTask): ...@@ -2604,7 +2616,8 @@ class TaskAddData(StateTransitionTask):
intWrote = intToWrite intWrote = intToWrite
# Quick hack, attach an update statement here. TODO: create an "update" task # Quick hack, attach an update statement here. TODO: create an "update" task
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB if (not Config.getConfig().use_shadow_db) and Dice.throw(
5) == 0: # 1 in N chance, plus not using shaddow DB
intToUpdate = db.getNextInt() # Updated, but should not succeed intToUpdate = db.getNextInt() # Updated, but should not succeed
nextColor = db.getNextColor() nextColor = db.getNextColor()
sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here
...@@ -2624,7 +2637,7 @@ class TaskAddData(StateTransitionTask): ...@@ -2624,7 +2637,7 @@ class TaskAddData(StateTransitionTask):
try: try:
readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'". readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'".
format(db.getName(), regTableName, nextTick)) format(db.getName(), regTableName, nextTick))
if readBack != intWrote : if readBack != intWrote:
raise taos.error.ProgrammingError( raise taos.error.ProgrammingError(
"Failed to read back same data, wrote: {}, read: {}" "Failed to read back same data, wrote: {}, read: {}"
.format(intWrote, readBack), 0x999) .format(intWrote, readBack), 0x999)
...@@ -2635,7 +2648,7 @@ class TaskAddData(StateTransitionTask): ...@@ -2635,7 +2648,7 @@ class TaskAddData(StateTransitionTask):
"Failed to read back same data for tick: {}, wrote: {}, read: EMPTY" "Failed to read back same data for tick: {}, wrote: {}, read: EMPTY"
.format(nextTick, intWrote), .format(nextTick, intWrote),
errno) errno)
elif errno == CrashGenError.INVALID_MULTIPLE_RESULT : # multiple results elif errno == CrashGenError.INVALID_MULTIPLE_RESULT: # multiple results
raise taos.error.ProgrammingError( raise taos.error.ProgrammingError(
"Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS" "Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS"
.format(nextTick, intWrote), .format(nextTick, intWrote),
...@@ -2668,7 +2681,7 @@ class TaskAddData(StateTransitionTask): ...@@ -2668,7 +2681,7 @@ class TaskAddData(StateTransitionTask):
dbc = wt.getDbConn() dbc = wt.getDbConn()
numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
tblSeq = list(range(numTables )) tblSeq = list(range(numTables))
random.shuffle(tblSeq) # now we have random sequence random.shuffle(tblSeq) # now we have random sequence
for i in tblSeq: for i in tblSeq:
if (i in self.activeTable): # wow already active if (i in self.activeTable): # wow already active
...@@ -2692,6 +2705,7 @@ class TaskAddData(StateTransitionTask): ...@@ -2692,6 +2705,7 @@ class TaskAddData(StateTransitionTask):
self.activeTable.discard(i) # not raising an error, unlike remove self.activeTable.discard(i) # not raising an error, unlike remove
class TaskDeleteData(StateTransitionTask): class TaskDeleteData(StateTransitionTask):
# Track which table is being actively worked on # Track which table is being actively worked on
activeTable: Set[int] = set() activeTable: Set[int] = set()
...@@ -2719,7 +2733,7 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2719,7 +2733,7 @@ class TaskDeleteData(StateTransitionTask):
def canBeginFrom(cls, state: AnyState): def canBeginFrom(cls, state: AnyState):
return state.canDeleteData() return state.canDeleteData()
def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): def _lockTableIfNeeded(self, fullTableName, extraMsg=''):
if Config.getConfig().verify_data: if Config.getConfig().verify_data:
# Logging.info("Locking table: {}".format(fullTableName)) # Logging.info("Locking table: {}".format(fullTableName))
self.lockTable(fullTableName) self.lockTable(fullTableName)
...@@ -2740,7 +2754,7 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2740,7 +2754,7 @@ class TaskDeleteData(StateTransitionTask):
def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches def _deleteData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
del_Records = int(numRecords/5) del_Records = int(numRecords / 5)
if Dice.throw(2) == 0: if Dice.throw(2) == 0:
for j in range(del_Records): # number of records per table for j in range(del_Records): # number of records per table
intToWrite = db.getNextInt() intToWrite = db.getNextInt()
...@@ -2756,7 +2770,8 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2756,7 +2770,8 @@ class TaskDeleteData(StateTransitionTask):
# TODO: too ugly trying to lock the table reliably, refactor... # TODO: too ugly trying to lock the table reliably, refactor...
fullTableName = db.getName() + '.' + regTableName fullTableName = db.getName() + '.' + regTableName
self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock self._lockTableIfNeeded(
fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
try: try:
sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {}) sql = "delete from {} where ts = '{}' ;".format( # removed: tags ('{}', {})
...@@ -2772,7 +2787,8 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2772,7 +2787,8 @@ class TaskDeleteData(StateTransitionTask):
intWrote = intToWrite intWrote = intToWrite
# Quick hack, attach an update statement here. TODO: create an "update" task # Quick hack, attach an update statement here. TODO: create an "update" task
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB if (not Config.getConfig().use_shadow_db) and Dice.throw(
5) == 0: # 1 in N chance, plus not using shaddow DB
intToUpdate = db.getNextInt() # Updated, but should not succeed intToUpdate = db.getNextInt() # Updated, but should not succeed
# nextColor = db.getNextColor() # nextColor = db.getNextColor()
sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here sql = "delete from {} where ts = '{}' ;".format( # "INSERt" means "update" here
...@@ -2793,17 +2809,17 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2793,17 +2809,17 @@ class TaskDeleteData(StateTransitionTask):
dbc.query("SELECT * from {}.{} WHERE ts='{}'". dbc.query("SELECT * from {}.{} WHERE ts='{}'".
format(db.getName(), regTableName, nextTick)) format(db.getName(), regTableName, nextTick))
result = dbc.getQueryResult() result = dbc.getQueryResult()
if len(result)==0: if len(result) == 0:
# means data has been delete # means data has been delete
print("D1",end="") # DF means delete failed print("D1", end="") # DF means delete failed
else: else:
print("DF",end="") # DF means delete failed print("DF", end="") # DF means delete failed
except taos.error.ProgrammingError as err: except taos.error.ProgrammingError as err:
errno = Helper.convertErrno(err.errno) errno = Helper.convertErrno(err.errno)
# if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result
# print("D1",end="") # D1 means delete data success and only 1 record # print("D1",end="") # D1 means delete data success and only 1 record
if errno in [0x218, 0x362,0x2662]: # table doesn't exist if errno in [0x218, 0x362, 0x2662]: # table doesn't exist
# do nothing # do nothing
pass pass
else: else:
...@@ -2827,7 +2843,8 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2827,7 +2843,8 @@ class TaskDeleteData(StateTransitionTask):
# TODO: too ugly trying to lock the table reliably, refactor... # TODO: too ugly trying to lock the table reliably, refactor...
fullTableName = db.getName() + '.' + regTableName fullTableName = db.getName() + '.' + regTableName
self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock self._lockTableIfNeeded(
fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock
try: try:
sql = "delete from {} ;".format( # removed: tags ('{}', {}) sql = "delete from {} ;".format( # removed: tags ('{}', {})
...@@ -2837,7 +2854,8 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2837,7 +2854,8 @@ class TaskDeleteData(StateTransitionTask):
# Logging.info("Data added: {}".format(sql)) # Logging.info("Data added: {}".format(sql))
# Quick hack, attach an update statement here. TODO: create an "update" task # Quick hack, attach an update statement here. TODO: create an "update" task
if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB if (not Config.getConfig().use_shadow_db) and Dice.throw(
5) == 0: # 1 in N chance, plus not using shaddow DB
sql = "delete from {} ;".format( # "INSERt" means "update" here sql = "delete from {} ;".format( # "INSERt" means "update" here
fullTableName) fullTableName)
dbc.execute(sql) dbc.execute(sql)
...@@ -2852,17 +2870,17 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2852,17 +2870,17 @@ class TaskDeleteData(StateTransitionTask):
dbc.query("SELECT * from {}.{} WHERE ts='{}'". dbc.query("SELECT * from {}.{} WHERE ts='{}'".
format(db.getName(), regTableName, nextTick)) format(db.getName(), regTableName, nextTick))
result = dbc.getQueryResult() result = dbc.getQueryResult()
if len(result)==0: if len(result) == 0:
# means data has been delete # means data has been delete
print("DA",end="") print("DA", end="")
else: else:
print("DF",end="") # DF means delete failed print("DF", end="") # DF means delete failed
except taos.error.ProgrammingError as err: except taos.error.ProgrammingError as err:
errno = Helper.convertErrno(err.errno) errno = Helper.convertErrno(err.errno)
# if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result # if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result
# print("Da",end="") # Da means delete data success and for all datas # print("Da",end="") # Da means delete data success and for all datas
if errno in [0x218, 0x362,0x2662]: # table doesn't exist if errno in [0x218, 0x362, 0x2662]: # table doesn't exist
# do nothing # do nothing
pass pass
else: else:
...@@ -2885,7 +2903,7 @@ class TaskDeleteData(StateTransitionTask): ...@@ -2885,7 +2903,7 @@ class TaskDeleteData(StateTransitionTask):
dbc = wt.getDbConn() dbc = wt.getDbConn()
numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES numTables = self.LARGE_NUMBER_OF_TABLES if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_TABLES
numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS
tblSeq = list(range(numTables )) tblSeq = list(range(numTables))
random.shuffle(tblSeq) # now we have random sequence random.shuffle(tblSeq) # now we have random sequence
for i in tblSeq: for i in tblSeq:
if (i in self.activeTable): # wow already active if (i in self.activeTable): # wow already active
...@@ -2912,18 +2930,18 @@ class ThreadStacks: # stack info for all threads ...@@ -2912,18 +2930,18 @@ class ThreadStacks: # stack info for all threads
self._allStacks = {} self._allStacks = {}
allFrames = sys._current_frames() # All current stack frames, keyed with "ident" allFrames = sys._current_frames() # All current stack frames, keyed with "ident"
for th in threading.enumerate(): # For each thread for th in threading.enumerate(): # For each thread
stack = traceback.extract_stack(allFrames[th.ident]) #type: ignore # Get stack for a thread stack = traceback.extract_stack(allFrames[th.ident]) # type: ignore # Get stack for a thread
shortTid = th.native_id % 10000 #type: ignore shortTid = th.native_id % 10000 # type: ignore
self._allStacks[shortTid] = stack # Was using th.native_id self._allStacks[shortTid] = stack # Was using th.native_id
def record_current_time(self,current_time): def record_current_time(self, current_time):
self.current_time = current_time self.current_time = current_time
def print(self, filteredEndName = None, filterInternal = False): def print(self, filteredEndName=None, filterInternal=False):
for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom for shortTid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom
lastFrame = stack[-1] lastFrame = stack[-1]
if filteredEndName: # we need to filter out stacks that match this name if filteredEndName: # we need to filter out stacks that match this name
if lastFrame.name == filteredEndName : # end did not match if lastFrame.name == filteredEndName: # end did not match
continue continue
if filterInternal: if filterInternal:
if lastFrame.name in ['wait', 'invoke_excepthook', if lastFrame.name in ['wait', 'invoke_excepthook',
...@@ -2937,7 +2955,9 @@ class ThreadStacks: # stack info for all threads ...@@ -2937,7 +2955,9 @@ class ThreadStacks: # stack info for all threads
lastSqlForThread = DbConn.fetchSqlForThread(shortTid) lastSqlForThread = DbConn.fetchSqlForThread(shortTid)
last_sql_commit_time = DbConn.get_save_sql_time(shortTid) last_sql_commit_time = DbConn.get_save_sql_time(shortTid)
# time_cost = DbConn.get_time_cost() # time_cost = DbConn.get_time_cost()
print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid, self.current_time-last_sql_commit_time ,lastSqlForThread)) print("Last SQL statement attempted from thread {} ({:.4f} sec ago) is: {}".format(shortTid,
self.current_time - last_sql_commit_time,
lastSqlForThread))
stackFrame = 0 stackFrame = 0
for frame in stack: # was using: reversed(stack) for frame in stack: # was using: reversed(stack)
# print(frame) # print(frame)
...@@ -2946,9 +2966,10 @@ class ThreadStacks: # stack info for all threads ...@@ -2946,9 +2966,10 @@ class ThreadStacks: # stack info for all threads
print(" {}".format(frame.line)) print(" {}".format(frame.line))
stackFrame += 1 stackFrame += 1
print("-----> End of Thread Info ----->\n") print("-----> End of Thread Info ----->\n")
if self.current_time-last_sql_commit_time >100: # dead lock occured if self.current_time - last_sql_commit_time > 100: # dead lock occured
print("maybe dead locked of thread {} ".format(shortTid)) print("maybe dead locked of thread {} ".format(shortTid))
class ClientManager: class ClientManager:
def __init__(self): def __init__(self):
Logging.info("Starting service manager") Logging.info("Starting service manager")
...@@ -3062,7 +3083,6 @@ class ClientManager: ...@@ -3062,7 +3083,6 @@ class ClientManager:
svcMgr.stopTaosServices() svcMgr.stopTaosServices()
svcMgr = None svcMgr = None
# Release global variables # Release global variables
# gConfig = None # gConfig = None
Config.clearConfig() Config.clearConfig()
...@@ -3093,6 +3113,7 @@ class ClientManager: ...@@ -3093,6 +3113,7 @@ class ClientManager:
# self.tc.getDbManager().cleanUp() # clean up first, so we can show ZERO db connections # self.tc.getDbManager().cleanUp() # clean up first, so we can show ZERO db connections
self.tc.printStats() self.tc.printStats()
class MainExec: class MainExec:
def __init__(self): def __init__(self):
self._clientMgr = None self._clientMgr = None
...@@ -3131,7 +3152,8 @@ class MainExec: ...@@ -3131,7 +3152,8 @@ class MainExec:
def runService(self): def runService(self):
global gSvcMgr global gSvcMgr
gSvcMgr = self._svcMgr = ServiceManager(Config.getConfig().num_dnodes) # save it in a global variable TODO: hack alert gSvcMgr = self._svcMgr = ServiceManager(
Config.getConfig().num_dnodes) # save it in a global variable TODO: hack alert
gSvcMgr.run() # run to some end state gSvcMgr.run() # run to some end state
gSvcMgr = self._svcMgr = None gSvcMgr = self._svcMgr = None
...@@ -3259,7 +3281,6 @@ class MainExec: ...@@ -3259,7 +3281,6 @@ class MainExec:
return parser return parser
def init(self): # TODO: refactor def init(self): # TODO: refactor
global gContainer global gContainer
gContainer = Container() # micky-mouse DI gContainer = Container() # micky-mouse DI
...@@ -3271,7 +3292,7 @@ class MainExec: ...@@ -3271,7 +3292,7 @@ class MainExec:
Config.init(parser) Config.init(parser)
# Sanity check for arguments # Sanity check for arguments
if Config.getConfig().use_shadow_db and Config.getConfig().max_dbs>1 : if Config.getConfig().use_shadow_db and Config.getConfig().max_dbs > 1:
raise CrashGenError("Cannot combine use-shadow-db with max-dbs of more than 1") raise CrashGenError("Cannot combine use-shadow-db with max-dbs of more than 1")
Logging.clsInit(Config.getConfig().debug) Logging.clsInit(Config.getConfig().debug)
...@@ -3306,7 +3327,7 @@ class Container(): ...@@ -3306,7 +3327,7 @@ class Container():
return self._cargo[name] # just a simple lookup return self._cargo[name] # just a simple lookup
def __setattr__(self, name, value): def __setattr__(self, name, value):
if name == '_cargo' : # reserved vars if name == '_cargo': # reserved vars
super().__setattr__(name, value) super().__setattr__(name, value)
return return
self._verifyValidProperty(name) self._verifyValidProperty(name)
......
...@@ -12,12 +12,13 @@ ...@@ -12,12 +12,13 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import taos import taos
from util.log import * from taos.tmq import *
from util.cases import * from util.cases import *
from util.sql import *
from util.common import * from util.common import *
from util.log import *
from util.sql import *
from util.sqlset import * from util.sqlset import *
from taos.tmq import *
class TDTestCase: class TDTestCase:
def init(self, conn, logSql, replicaVar=1): def init(self, conn, logSql, replicaVar=1):
...@@ -29,7 +30,7 @@ class TDTestCase: ...@@ -29,7 +30,7 @@ class TDTestCase:
self.binary_length = 20 # the length of binary for column_dict self.binary_length = 20 # the length of binary for column_dict
self.nchar_length = 20 # the length of nchar for column_dict self.nchar_length = 20 # the length of nchar for column_dict
self.column_dict = { self.column_dict = {
'ts' : 'timestamp', 'ts': 'timestamp',
'col1': 'tinyint', 'col1': 'tinyint',
'col2': 'smallint', 'col2': 'smallint',
'col3': 'int', 'col3': 'int',
...@@ -45,7 +46,7 @@ class TDTestCase: ...@@ -45,7 +46,7 @@ class TDTestCase:
'col13': f'nchar({self.nchar_length})' 'col13': f'nchar({self.nchar_length})'
} }
self.tag_dict = { self.tag_dict = {
'ts_tag' : 'timestamp', 'ts_tag': 'timestamp',
't1': 'tinyint', 't1': 'tinyint',
't2': 'smallint', 't2': 'smallint',
't3': 'int', 't3': 'int',
...@@ -67,25 +68,28 @@ class TDTestCase: ...@@ -67,25 +68,28 @@ class TDTestCase:
f'now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据"' f'now,1,2,3,4,5,6,7,8,9.9,10.1,true,"abcd","涛思数据"'
] ]
self.tbnum = 1 self.tbnum = 1
def prepare_data(self): def prepare_data(self):
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict)) tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict))
for i in range(self.tbnum): for i in range(self.tbnum):
tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})') tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags({self.tag_list[i]})')
for j in self.values_list: for j in self.values_list:
tdSql.execute(f'insert into {self.stbname}_{i} values({j})') tdSql.execute(f'insert into {self.stbname}_{i} values({j})')
def create_user(self): def create_user(self):
for user_name in ['jiacy1_all','jiacy1_read','jiacy1_write','jiacy1_none','jiacy0_all','jiacy0_read','jiacy0_write','jiacy0_none']: for user_name in ['jiacy1_all', 'jiacy1_read', 'jiacy1_write', 'jiacy1_none', 'jiacy0_all', 'jiacy0_read',
'jiacy0_write', 'jiacy0_none']:
if 'jiacy1' in user_name.lower(): if 'jiacy1' in user_name.lower():
tdSql.execute(f'create user {user_name} pass "123" sysinfo 1') tdSql.execute(f'create user {user_name} pass "123" sysinfo 1')
elif 'jiacy0' in user_name.lower(): elif 'jiacy0' in user_name.lower():
tdSql.execute(f'create user {user_name} pass "123" sysinfo 0') tdSql.execute(f'create user {user_name} pass "123" sysinfo 0')
for user_name in ['jiacy1_all','jiacy1_read','jiacy0_all','jiacy0_read']: for user_name in ['jiacy1_all', 'jiacy1_read', 'jiacy0_all', 'jiacy0_read']:
tdSql.execute(f'grant read on db to {user_name}') tdSql.execute(f'grant read on db to {user_name}')
for user_name in ['jiacy1_all','jiacy1_write','jiacy0_all','jiacy0_write']: for user_name in ['jiacy1_all', 'jiacy1_write', 'jiacy0_all', 'jiacy0_write']:
tdSql.execute(f'grant write on db to {user_name}') tdSql.execute(f'grant write on db to {user_name}')
def user_privilege_check(self): def user_privilege_check(self):
jiacy1_read_conn = taos.connect(user='jiacy1_read',password='123') jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123')
sql = "create table ntb (ts timestamp,c0 int)" sql = "create table ntb (ts timestamp,c0 int)"
expectErrNotOccured = True expectErrNotOccured = True
try: try:
...@@ -94,32 +98,34 @@ class TDTestCase: ...@@ -94,32 +98,34 @@ class TDTestCase:
expectErrNotOccured = False expectErrNotOccured = False
if expectErrNotOccured: if expectErrNotOccured:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured" ) tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured")
else: else:
self.queryRows = 0 self.queryRows = 0
self.queryCols = 0 self.queryCols = 0
self.queryResult = None self.queryResult = None
tdLog.info(f"sql:{sql}, expect error occured") tdLog.info(f"sql:{sql}, expect error occured")
pass pass
def drop_topic(self): def drop_topic(self):
jiacy1_all_conn = taos.connect(user='jiacy1_all',password='123') jiacy1_all_conn = taos.connect(user='jiacy1_all', password='123')
jiacy1_read_conn = taos.connect(user='jiacy1_read',password='123') jiacy1_read_conn = taos.connect(user='jiacy1_read', password='123')
jiacy1_write_conn = taos.connect(user='jiacy1_write',password='123') jiacy1_write_conn = taos.connect(user='jiacy1_write', password='123')
jiacy1_none_conn = taos.connect(user='jiacy1_none',password='123') jiacy1_none_conn = taos.connect(user='jiacy1_none', password='123')
jiacy0_all_conn = taos.connect(user='jiacy0_all',password='123') jiacy0_all_conn = taos.connect(user='jiacy0_all', password='123')
jiacy0_read_conn = taos.connect(user='jiacy0_read',password='123') jiacy0_read_conn = taos.connect(user='jiacy0_read', password='123')
jiacy0_write_conn = taos.connect(user='jiacy0_write',password='123') jiacy0_write_conn = taos.connect(user='jiacy0_write', password='123')
jiacy0_none_conn = taos.connect(user='jiacy0_none',password='123') jiacy0_none_conn = taos.connect(user='jiacy0_none', password='123')
tdSql.execute('create topic root_db as select * from db.stb') tdSql.execute('create topic root_db as select * from db.stb')
for user in [jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: for user in [jiacy1_all_conn, jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]:
user.execute(f'create topic db_jiacy as select * from db.stb') user.execute(f'create topic db_jiacy as select * from db.stb')
user.execute('drop topic db_jiacy') user.execute('drop topic db_jiacy')
for user in [jiacy1_write_conn,jiacy1_none_conn,jiacy0_write_conn,jiacy0_none_conn,jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: for user in [jiacy1_write_conn, jiacy1_none_conn, jiacy0_write_conn, jiacy0_none_conn, jiacy1_all_conn,
jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]:
sql_list = [] sql_list = []
if user in [jiacy1_all_conn,jiacy1_read_conn,jiacy0_all_conn,jiacy0_read_conn]: if user in [jiacy1_all_conn, jiacy1_read_conn, jiacy0_all_conn, jiacy0_read_conn]:
sql_list = ['drop topic root_db'] sql_list = ['drop topic root_db']
elif user in [jiacy1_write_conn,jiacy1_none_conn,jiacy0_write_conn,jiacy0_none_conn]: elif user in [jiacy1_write_conn, jiacy1_none_conn, jiacy0_write_conn, jiacy0_none_conn]:
sql_list = ['drop topic root_db','create topic db_jiacy as select * from db.stb'] sql_list = ['drop topic root_db', 'create topic db_jiacy as select * from db.stb']
for sql in sql_list: for sql in sql_list:
expectErrNotOccured = True expectErrNotOccured = True
try: try:
...@@ -128,33 +134,26 @@ class TDTestCase: ...@@ -128,33 +134,26 @@ class TDTestCase:
expectErrNotOccured = False expectErrNotOccured = False
if expectErrNotOccured: if expectErrNotOccured:
caller = inspect.getframeinfo(inspect.stack()[1][0]) caller = inspect.getframeinfo(inspect.stack()[1][0])
tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured" ) tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{sql}, expect error not occured")
else: else:
self.queryRows = 0 self.queryRows = 0
self.queryCols = 0 self.queryCols = 0
self.queryResult = None self.queryResult = None
tdLog.info(f"sql:{sql}, expect error occured") tdLog.info(f"sql:{sql}, expect error occured")
def tmq_commit_cb_print(tmq, resp, param=None): def tmq_commit_cb_print(tmq, resp, param=None):
print(f"commit: {resp}, tmq: {tmq}, param: {param}") print(f"commit: {resp}, tmq: {tmq}, param: {param}")
def subscribe_topic(self): def subscribe_topic(self):
print("create topic") print("create topic")
tdSql.execute('create topic db_topic as select * from db.stb') tdSql.execute('create topic db_topic as select * from db.stb')
tdSql.execute('grant subscribe on db_topic to jiacy1_all') tdSql.execute('grant subscribe on db_topic to jiacy1_all')
print("build consumer") print("build consumer")
conf = TaosTmqConf() tmq = Consumer({"group.id": "tg2", "td.connect.user": "jiacy1_all", "td.connect.pass": "123",
conf.set("group.id", "tg2") "enable.auto.commit": "true"})
conf.set("td.connect.user", "jiacy1_all")
conf.set("td.connect.pass", "123")
conf.set("enable.auto.commit", "true")
conf.set_auto_commit_cb(self.tmq_commit_cb_print, None)
tmq = conf.new_consumer()
print("build topic list") print("build topic list")
topic_list = TaosTmqList() tmq.subscribe(["db_topic"])
topic_list.append("db_topic")
print("basic consume loop") print("basic consume loop")
tmq.subscribe(topic_list)
sub_list = tmq.subscription()
print("subscribed topics: ", sub_list)
c = 0 c = 0
l = 0 l = 0
for i in range(10): for i in range(10):
...@@ -163,19 +162,22 @@ class TDTestCase: ...@@ -163,19 +162,22 @@ class TDTestCase:
res = tmq.poll(10) res = tmq.poll(10)
print(f"loop {l}") print(f"loop {l}")
l += 1 l += 1
if res: if not res:
print(f"received empty message at loop {l} (committed {c})")
continue
if res.error():
print(f"consumer error at loop {l} (committed {c}) {res.error()}")
continue
c += 1 c += 1
topic = res.get_topic_name() topic = res.topic()
vg = res.get_vgroup_id() db = res.database()
db = res.get_db_name() print(f"topic: {topic}\ndb: {db}")
print(f"topic: {topic}\nvgroup id: {vg}\ndb: {db}")
for row in res: for row in res:
print(row) print(row.fetchall())
print("* committed") print("* committed")
tmq.commit(res) tmq.commit(res)
else:
print(f"received empty message at loop {l} (committed {c})")
pass
def run(self): def run(self):
tdSql.prepare() tdSql.prepare()
...@@ -184,9 +186,11 @@ class TDTestCase: ...@@ -184,9 +186,11 @@ class TDTestCase:
self.drop_topic() self.drop_topic()
self.user_privilege_check() self.user_privilege_check()
self.subscribe_topic() self.subscribe_topic()
def stop(self): def stop(self):
tdSql.close() tdSql.close()
tdLog.success("%s successfully executed" % __file__) tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase())
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册