diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 89b024eb17b60cc49caec70deb1600f05f834a7b..cd9853df03bab1a953ee8fd79e66a52e84ba157c 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -774,6 +774,10 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql, char** boundC index = 0; sToken = tStrGetToken(sql, &index, false); + if (sToken.type == TK_ILLEGAL) { + return tscSQLSyntaxErrMsg(pCmd->payload, "unrecognized token", sToken.z); + } + if (sToken.type == TK_RP) { break; } diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 0036c5d0c1b92e7ba929390929ac0dfc49a76737..aecdecf6b66f77c6ee7403f1ed872c393c6f9086 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -395,11 +395,18 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { const char* msg2 = "name too long"; SCreateDbInfo* pCreateDB = &(pInfo->pMiscInfo->dbOpt); - if (tscValidateName(&pCreateDB->dbname) != TSDB_CODE_SUCCESS) { + if (pCreateDB->dbname.n >= TSDB_DB_NAME_LEN) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + char buf[TSDB_DB_NAME_LEN] = {0}; + SStrToken token = taosTokenDup(&pCreateDB->dbname, buf, tListLen(buf)); + + if (tscValidateName(&token) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &(pCreateDB->dbname)); + int32_t ret = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), &token); if (ret != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b1d8f2f8130d13420ef5424e70afa20430937123..e05309bbafaa7cbd138bef69e53d01e7c98b8ef4 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -2469,8 +2469,8 @@ int32_t getMultiTableMetaFromMnode(SSqlObj *pSql, SArray* pNameList, SArray* pVg pNew->fp = fp; pNew->param = (void *)pSql->self; - tscDebug("0x%"PRIx64" metaRid from %" PRId64 " to %" PRId64 , pSql->self, pSql->metaRid, pNew->self); - + tscDebug("0x%"PRIx64" metaRid from 0x%" PRIx64 " to 0x%" PRIx64 , pSql->self, pSql->metaRid, pNew->self); + pSql->metaRid = pNew->self; int32_t code = tscBuildAndSendRequest(pNew, NULL); if (code == TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 4b1b3e9080fe9078380540792cb2c8835acc33dc..2f32e775d66e3c3e1cf45b1437f3fa82278346a6 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -1469,6 +1469,8 @@ static void joinRetrieveFinalResCallback(void* param, TAOS_RES* tres, int numOfR SSqlRes* pRes1 = &pParentSql->pSubs[i]->res; + pParentSql->res.precision = pRes1->precision; + if (pRes1->row > 0 && pRes1->numOfRows > 0) { tscDebug("0x%"PRIx64" sub:0x%"PRIx64" index:%d numOfRows:%d total:%"PRId64 " (not retrieve)", pParentSql->self, pParentSql->pSubs[i]->self, i, pRes1->numOfRows, pRes1->numOfTotal); diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index d6bbc288adf92674b308e8d423d1b19cd58113c0..a1266bb2cb3d793088513a388568c74c94e9f54e 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -46,7 +46,7 @@ char tsEmail[TSDB_FQDN_LEN] = {0}; int32_t tsDnodeId = 0; // common -int32_t tsRpcTimer = 1000; +int32_t tsRpcTimer = 300; int32_t tsRpcMaxTime = 600; // seconds; int32_t tsRpcForceTcp = 0; //disable this, means query, show command use udp protocol as default int32_t tsMaxShellConns = 50000; diff --git a/src/connector/go b/src/connector/go index 050667e5b4d0eafa5387e4283e713559b421203f..7a26c432f8b4203e42344ff3290b9b9b01b983d5 160000 --- a/src/connector/go +++ b/src/connector/go @@ -1 +1 @@ -Subproject commit 050667e5b4d0eafa5387e4283e713559b421203f +Subproject commit 7a26c432f8b4203e42344ff3290b9b9b01b983d5 diff --git a/src/connector/python/taos/__init__.py b/src/connector/python/taos/__init__.py index 973263573808232e4e71dc0158585624a8e7d2ab..52c6db311ecc4c2f944372ae3334fdc58cb6e779 100644 --- a/src/connector/python/taos/__init__.py +++ b/src/connector/python/taos/__init__.py @@ -2,6 +2,10 @@ from .connection import TDengineConnection from .cursor import TDengineCursor +# For some reason, the following is needed for VS Code (through PyLance) to +# recognize that "error" is a valid module of the "taos" package. +from .error import ProgrammingError + # Globals threadsafety = 0 paramstyle = 'pyformat' diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 09f632ea32077b3e9bd3fcddcff418f280192c3f..ab15e851e76e1c7ad29a81a7cd1874a9e89d82ed 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -215,11 +215,11 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit") #define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping") #define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing") +#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing") #define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") #define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") #define TSDB_CODE_VND_IS_SYNCING TAOS_DEF_ERROR_CODE(0, 0x0513) //"Database is syncing") #define TSDB_CODE_VND_INVALID_TSDB_STATE TAOS_DEF_ERROR_CODE(0, 0x0514) //"Invalid tsdb state") -#define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0515) //"Database is closing") // tsdb #define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600) //"Invalid table ID") diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index ac3e5d86ecfd57b3b06f49b39bcb991ff06423d5..e9acd5b9bc69f0354f28009254c57c004fbb7995 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -656,8 +656,6 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * dnodeReportStep("mnode-sdb", stepDesc, 0); } - if (qtype == TAOS_QTYPE_QUERY) return sdbPerformDeleteAction(pHead, pTable); - pthread_mutex_lock(&tsSdbMgmt.mutex); if (pHead->version == 0) { @@ -721,13 +719,11 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * if (action == SDB_ACTION_INSERT) { return sdbPerformInsertAction(pHead, pTable); } else if (action == SDB_ACTION_DELETE) { - //if (qtype == TAOS_QTYPE_FWD) { - // Drop database/stable may take a long time and cause a timeout, so we confirm first then reput it into queue - // sdbWriteFwdToQueue(1, hparam, TAOS_QTYPE_QUERY, unused); - // return TSDB_CODE_SUCCESS; - //} else { - return sdbPerformDeleteAction(pHead, pTable); - //} + if (qtype == TAOS_QTYPE_FWD) { + // Drop database/stable may take a long time and cause a timeout, so we confirm first + syncConfirmForward(tsSdbMgmt.sync, pHead->version, TSDB_CODE_SUCCESS, false); + } + return sdbPerformDeleteAction(pHead, pTable); } else if (action == SDB_ACTION_UPDATE) { return sdbPerformUpdateAction(pHead, pTable); } else { @@ -1140,7 +1136,10 @@ static void *sdbWorkerFp(void *pWorker) { sdbConfirmForward(1, pRow, pRow->code); } else { if (qtype == TAOS_QTYPE_FWD) { - syncConfirmForward(tsSdbMgmt.sync, pRow->pHead.version, pRow->code, false); + int32_t action = pRow->pHead.msgType % 10; + if (action != SDB_ACTION_DELETE) { + syncConfirmForward(tsSdbMgmt.sync, pRow->pHead.version, pRow->code, false); + } } sdbFreeFromQueue(pRow); } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 324a7c79c5b7dbfa69bbdf240301c3c710f90b59..817469819700d6529389fca860b76feea3f1d5a4 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -68,7 +68,7 @@ int tsdbCreateTable(STsdbRepo *repo, STableCfg *pCfg) { TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid])); return 0; } else { - tsdbError("vgId:%d table %s at tid %d uid %" PRIu64 + tsdbInfo("vgId:%d table %s at tid %d uid %" PRIu64 " exists, replace it with new table, this can be not reasonable", REPO_ID(pRepo), TABLE_CHAR_NAME(pMeta->tables[tid]), TABLE_TID(pMeta->tables[tid]), TABLE_UID(pMeta->tables[tid])); @@ -1055,10 +1055,7 @@ static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable) { STable *pSTable = pTable->pSuper; ASSERT(pSTable != NULL); - STSchema *pSchema = tsdbGetTableTagSchema(pTable); - STColumn *pCol = schemaColAt(pSchema, DEFAULT_TAG_INDEX_COLUMN); - - char * key = tdGetKVRowValOfCol(pTable->tagVal, pCol->colId); + char* key = getTagIndexKey(pTable); SArray *res = tSkipListGet(pSTable->pIndex, key); size_t size = taosArrayGetSize(res); diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index fd2b40318454aca0b23c92ee89b60176f465d839..1545d44395d6baca6f33c850e35796c96af8d52f 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -368,40 +368,39 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC goto out_of_memory; } - assert(pCond != NULL && pCond->numOfCols > 0 && pMemRef != NULL); + assert(pCond != NULL && pMemRef != NULL); if (ASCENDING_TRAVERSE(pCond->order)) { assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); } else { assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); } + if (pCond->numOfCols > 0) { + // allocate buffer in order to load data blocks from file + pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis)); + if (pQueryHandle->statis == NULL) { + goto out_of_memory; + } - // allocate buffer in order to load data blocks from file - pQueryHandle->statis = calloc(pCond->numOfCols, sizeof(SDataStatis)); - if (pQueryHandle->statis == NULL) { - goto out_of_memory; - } - - pQueryHandle->pColumns = taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? - if (pQueryHandle->pColumns == NULL) { - goto out_of_memory; - } + pQueryHandle->pColumns = + taosArrayInit(pCond->numOfCols, sizeof(SColumnInfoData)); // todo: use list instead of array? + if (pQueryHandle->pColumns == NULL) { + goto out_of_memory; + } - for (int32_t i = 0; i < pCond->numOfCols; ++i) { - SColumnInfoData colInfo = {{0}, 0}; + for (int32_t i = 0; i < pCond->numOfCols; ++i) { + SColumnInfoData colInfo = {{0}, 0}; - colInfo.info = pCond->colList[i]; - colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes); - if (colInfo.pData == NULL) { - goto out_of_memory; + colInfo.info = pCond->colList[i]; + colInfo.pData = calloc(1, EXTRA_BYTES + pQueryHandle->outputCapacity * pCond->colList[i].bytes); + if (colInfo.pData == NULL) { + goto out_of_memory; + } + taosArrayPush(pQueryHandle->pColumns, &colInfo); + pQueryHandle->statis[i].colId = colInfo.info.colId; } - taosArrayPush(pQueryHandle->pColumns, &colInfo); - pQueryHandle->statis[i].colId = colInfo.info.colId; - } - if (pCond->numOfCols > 0) { pQueryHandle->defaultLoadColumn = getDefaultLoadColumns(pQueryHandle, true); } - STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); diff --git a/src/util/inc/ttoken.h b/src/util/inc/ttoken.h index 3bf030a9ebf96a27f16b27eb6d55505212274ba6..f62329183f82a57bcf65d136aaec65d8babd5f71 100644 --- a/src/util/inc/ttoken.h +++ b/src/util/inc/ttoken.h @@ -183,6 +183,7 @@ void taosCleanupKeywordsTable(); SStrToken tscReplaceStrToken(char **str, SStrToken *token, const char* newToken); +SStrToken taosTokenDup(SStrToken* pToken, char* buf, int32_t len); #ifdef __cplusplus } diff --git a/src/util/src/terror.c b/src/util/src/terror.c index 887b231ec6161ee99e5c64330595345a01ab34c0..382f872486485d93420d7f180dc11414f570132c 100644 --- a/src/util/src/terror.c +++ b/src/util/src/terror.c @@ -224,6 +224,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, "Database memory is fu TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, "Database memory is full for waiting commit") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, "Database is dropping") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_CLOSING, "Database is closing") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_SYNCING, "Database is syncing") diff --git a/src/util/src/ttokenizer.c b/src/util/src/ttokenizer.c index 138a79511c36eaab4087bf51007717944656b268..270913d7cbf6186bc132857f27231f88b24fe3ee 100644 --- a/src/util/src/ttokenizer.c +++ b/src/util/src/ttokenizer.c @@ -673,3 +673,15 @@ void taosCleanupKeywordsTable() { taosHashCleanup(m); } } + +SStrToken taosTokenDup(SStrToken* pToken, char* buf, int32_t len) { + assert(pToken != NULL && buf != NULL); + SStrToken token = *pToken; + token.z = buf; + + assert(len > token.n); + strncpy(token.z, pToken->z, pToken->n); + token.z[token.n] = 0; + + return token; +} diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 32f95321383981924c5b6496bd4302edca19da5e..7e6022fc872c3a2221514169ab00874011dc3cb9 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -91,18 +91,18 @@ static void vnodeIncRef(void *ptNode) { } void *vnodeAcquire(int32_t vgId) { - SVnodeObj **ppVnode = NULL; + SVnodeObj *pVnode = NULL; if (tsVnodesHash != NULL) { - ppVnode = taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *)); + taosHashGetClone(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, &pVnode, sizeof(void *)); } - if (ppVnode == NULL || *ppVnode == NULL) { + if (pVnode == NULL) { terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; vDebug("vgId:%d, not exist", vgId); return NULL; } - return *ppVnode; + return pVnode; } void vnodeRelease(void *vparam) { diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 16089c8e91bdab66af664a9b6c8b7fc0a5dabf04..555eda6d13eeb1dbbb83fbd89ee2672966aa8539 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -303,6 +303,17 @@ static int32_t vnodeWriteToWQueueImp(SVWriteMsg *pWrite) { } int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rparam) { + SVnodeObj *pVnode = vparam; + if (qtype == TAOS_QTYPE_RPC) { + if (!vnodeInReadyStatus(pVnode)) { + return TSDB_CODE_APP_NOT_READY; // it may be in deleting or closing state + } + + if (pVnode->role != TAOS_SYNC_ROLE_MASTER) { + return TSDB_CODE_APP_NOT_READY; + } + } + SVWriteMsg *pWrite = vnodeBuildVWriteMsg(vparam, wparam, qtype, rparam); if (pWrite == NULL) { assert(terrno != 0); diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py index 644aa799164acd4b44224ebd9ed30e39e722371c..b743eee2ef50c49da050cc78c77d0038acf0e507 100755 --- a/tests/pytest/crash_gen/crash_gen_main.py +++ b/tests/pytest/crash_gen/crash_gen_main.py @@ -37,6 +37,7 @@ import requests import gc import taos + from .shared.types import TdColumns, TdTags # from crash_gen import ServiceManager, TdeInstance, TdeSubProcess @@ -160,6 +161,7 @@ class WorkerThread: Logging.debug("[TRD] Thread Coordinator not running any more, worker thread now stopping...") break + # Before we fetch the task and run it, let's ensure we properly "use" the database (not needed any more) try: if (Config.getConfig().per_thread_db_connection): # most likely TRUE @@ -1362,9 +1364,12 @@ class Task(): Progress.emit(Progress.ACCEPTABLE_ERROR) self._err = err else: # not an acceptable error - errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, msg: {}, SQL: {}".format( + shortTid = threading.get_ident() % 10000 + errMsg = "[=] Unexpected Taos library exception ({}): errno=0x{:X}, thread={}, msg: {}, SQL: {}".format( self.__class__.__name__, - errno2, err, wt.getDbConn().getLastSql()) + errno2, + shortTid, + err, wt.getDbConn().getLastSql()) self.logDebug(errMsg) if Config.getConfig().debug: # raise # so that we see full stack @@ -1411,21 +1416,31 @@ class Task(): def lockTable(self, ftName): # full table name # print(" <<" + ftName + '_', end="", flush=True) - with Task._lock: - if not ftName in Task._tableLocks: + with Task._lock: # SHORT lock! so we only protect lock creation + if not ftName in Task._tableLocks: # Create new lock and add to list, if needed Task._tableLocks[ftName] = threading.Lock() - Task._tableLocks[ftName].acquire() + # No lock protection, anybody can do this any time + lock = Task._tableLocks[ftName] + # Logging.info("Acquiring lock: {}, {}".format(ftName, lock)) + lock.acquire() + # Logging.info("Acquiring lock successful: {}".format(lock)) def unlockTable(self, ftName): # print('_' + ftName + ">> ", end="", flush=True) - with Task._lock: + with Task._lock: if not ftName in self._tableLocks: raise RuntimeError("Corrupt state, no such lock") lock = Task._tableLocks[ftName] if not lock.locked(): raise RuntimeError("Corrupte state, already unlocked") - lock.release() + + # Important note, we want to protect unlocking under the task level + # locking, because we don't want the lock to be deleted (maybe in the futur) + # while we unlock it + # Logging.info("Releasing lock: {}".format(lock)) + lock.release() + # Logging.info("Releasing lock successful: {}".format(lock)) class ExecutionStats: @@ -1696,6 +1711,11 @@ class TdSuperTable: return dbc.query("SELECT * FROM {}.{}".format(self._dbName, self._stName)) > 0 def ensureRegTable(self, task: Optional[Task], dbc: DbConn, regTableName: str): + ''' + Make sure a regular table exists for this super table, creating it if necessary. + If there is an associated "Task" that wants to do this, "lock" this table so that + others don't access it while we create it. + ''' dbName = self._dbName sql = "select tbname from {}.{} where tbname in ('{}')".format(dbName, self._stName, regTableName) if dbc.query(sql) >= 1 : # reg table exists already @@ -1703,18 +1723,24 @@ class TdSuperTable: # acquire a lock first, so as to be able to *verify*. More details in TD-1471 fullTableName = dbName + '.' + regTableName - if task is not None: # TODO: what happens if we don't lock the table - task.lockTable(fullTableName) + if task is not None: # Somethime thie operation is requested on behalf of a "task" + # Logging.info("Locking table for creation: {}".format(fullTableName)) + task.lockTable(fullTableName) # in which case we'll lock this table to ensure serialized access + # Logging.info("Table locked for creation".format(fullTableName)) Progress.emit(Progress.CREATE_TABLE_ATTEMPT) # ATTEMPT to create a new table # print("(" + fullTableName[-3:] + ")", end="", flush=True) try: sql = "CREATE TABLE {} USING {}.{} tags ({})".format( fullTableName, dbName, self._stName, self._getTagStrForSql(dbc) ) + # Logging.info("Creating regular with SQL: {}".format(sql)) dbc.execute(sql) + # Logging.info("Regular table created: {}".format(sql)) finally: if task is not None: + # Logging.info("Unlocking table after creation: {}".format(fullTableName)) task.unlockTable(fullTableName) # no matter what + # Logging.info("Table unlocked after creation: {}".format(fullTableName)) def _getTagStrForSql(self, dbc) : tags = self._getTags(dbc) @@ -2011,9 +2037,30 @@ class TaskAddData(StateTransitionTask): def canBeginFrom(cls, state: AnyState): return state.canAddData() + def _lockTableIfNeeded(self, fullTableName, extraMsg = ''): + if Config.getConfig().verify_data: + # Logging.info("Locking table: {}".format(fullTableName)) + self.lockTable(fullTableName) + # Logging.info("Table locked {}: {}".format(extraMsg, fullTableName)) + # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written + else: + # Logging.info("Skipping locking table") + pass + + def _unlockTableIfNeeded(self, fullTableName): + if Config.getConfig().verify_data: + # Logging.info("Unlocking table: {}".format(fullTableName)) + self.unlockTable(fullTableName) + # Logging.info("Table unlocked: {}".format(fullTableName)) + else: + pass + # Logging.info("Skipping unlocking table") + def _addDataInBatch(self, db, dbc, regTableName, te: TaskExecutor): numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS + fullTableName = db.getName() + '.' + regTableName + self._lockTableIfNeeded(fullTableName, 'batch') sql = "INSERT INTO {} VALUES ".format(fullTableName) for j in range(numRecords): # number of records per table @@ -2021,51 +2068,60 @@ class TaskAddData(StateTransitionTask): nextTick = db.getNextTick() nextColor = db.getNextColor() sql += "('{}', {}, '{}');".format(nextTick, nextInt, nextColor) - dbc.execute(sql) + + # Logging.info("Adding data in batch: {}".format(sql)) + try: + dbc.execute(sql) + finally: + # Logging.info("Data added in batch: {}".format(sql)) + self._unlockTableIfNeeded(fullTableName) + + def _addData(self, db: Database, dbc, regTableName, te: TaskExecutor): # implied: NOT in batches numRecords = self.LARGE_NUMBER_OF_RECORDS if Config.getConfig().larger_data else self.SMALL_NUMBER_OF_RECORDS for j in range(numRecords): # number of records per table - nextInt = db.getNextInt() + intToWrite = db.getNextInt() nextTick = db.getNextTick() nextColor = db.getNextColor() if Config.getConfig().record_ops: self.prepToRecordOps() if self.fAddLogReady is None: raise CrashGenError("Unexpected empty fAddLogReady") - self.fAddLogReady.write("Ready to write {} to {}\n".format(nextInt, regTableName)) + self.fAddLogReady.write("Ready to write {} to {}\n".format(intToWrite, regTableName)) self.fAddLogReady.flush() os.fsync(self.fAddLogReady.fileno()) # TODO: too ugly trying to lock the table reliably, refactor... fullTableName = db.getName() + '.' + regTableName - if Config.getConfig().verify_data: - self.lockTable(fullTableName) - # print("_w" + str(nextInt % 100), end="", flush=True) # Trace what was written - + self._lockTableIfNeeded(fullTableName) # so that we are verify read-back. TODO: deal with exceptions before unlock + try: sql = "INSERT INTO {} VALUES ('{}', {}, '{}');".format( # removed: tags ('{}', {}) fullTableName, # ds.getFixedSuperTableName(), # ds.getNextBinary(), ds.getNextFloat(), - nextTick, nextInt, nextColor) + nextTick, intToWrite, nextColor) + # Logging.info("Adding data: {}".format(sql)) dbc.execute(sql) + # Logging.info("Data added: {}".format(sql)) + intWrote = intToWrite # Quick hack, attach an update statement here. TODO: create an "update" task if (not Config.getConfig().use_shadow_db) and Dice.throw(5) == 0: # 1 in N chance, plus not using shaddow DB - nextInt = db.getNextInt() + intToUpdate = db.getNextInt() # Updated, but should not succeed nextColor = db.getNextColor() sql = "INSERt INTO {} VALUES ('{}', {}, '{}');".format( # "INSERt" means "update" here fullTableName, - nextTick, nextInt, nextColor) + nextTick, intToUpdate, nextColor) # sql = "UPDATE {} set speed={}, color='{}' WHERE ts='{}'".format( # fullTableName, db.getNextInt(), db.getNextColor(), nextTick) dbc.execute(sql) + intWrote = intToUpdate # We updated, seems TDengine non-cluster accepts this. except: # Any exception at all - if Config.getConfig().verify_data: - self.unlockTable(fullTableName) + self._unlockTableIfNeeded(fullTableName) raise # Now read it back and verify, we might encounter an error if table is dropped @@ -2073,33 +2129,41 @@ class TaskAddData(StateTransitionTask): try: readBack = dbc.queryScalar("SELECT speed from {}.{} WHERE ts='{}'". format(db.getName(), regTableName, nextTick)) - if readBack != nextInt : + if readBack != intWrote : raise taos.error.ProgrammingError( "Failed to read back same data, wrote: {}, read: {}" - .format(nextInt, readBack), 0x999) + .format(intWrote, readBack), 0x999) except taos.error.ProgrammingError as err: errno = Helper.convertErrno(err.errno) - if errno in [CrashGenError.INVALID_EMPTY_RESULT, CrashGenError.INVALID_MULTIPLE_RESULT] : # not a single result + if errno == CrashGenError.INVALID_EMPTY_RESULT: # empty result raise taos.error.ProgrammingError( - "Failed to read back same data for tick: {}, wrote: {}, read: {}" - .format(nextTick, nextInt, "Empty Result" if errno == CrashGenError.INVALID_EMPTY_RESULT else "Multiple Result"), + "Failed to read back same data for tick: {}, wrote: {}, read: EMPTY" + .format(nextTick, intWrote), + errno) + elif errno == CrashGenError.INVALID_MULTIPLE_RESULT : # multiple results + raise taos.error.ProgrammingError( + "Failed to read back same data for tick: {}, wrote: {}, read: MULTIPLE RESULTS" + .format(nextTick, intWrote), errno) elif errno in [0x218, 0x362]: # table doesn't exist # do nothing - dummy = 0 + pass else: # Re-throw otherwise raise finally: - self.unlockTable(fullTableName) # Unlock the table no matter what + self._unlockTableIfNeeded(fullTableName) # Quite ugly, refactor lock/unlock + # Done with read-back verification, unlock the table now + else: + self._unlockTableIfNeeded(fullTableName) # Successfully wrote the data into the DB, let's record it somehow - te.recordDataMark(nextInt) + te.recordDataMark(intWrote) if Config.getConfig().record_ops: if self.fAddLogDone is None: raise CrashGenError("Unexpected empty fAddLogDone") - self.fAddLogDone.write("Wrote {} to {}\n".format(nextInt, regTableName)) + self.fAddLogDone.write("Wrote {} to {}\n".format(intWrote, regTableName)) self.fAddLogDone.flush() os.fsync(self.fAddLogDone.fileno()) @@ -2137,15 +2201,16 @@ class TaskAddData(StateTransitionTask): class ThreadStacks: # stack info for all threads def __init__(self): self._allStacks = {} - allFrames = sys._current_frames() - for th in threading.enumerate(): + allFrames = sys._current_frames() # All current stack frames + for th in threading.enumerate(): # For each thread if th.ident is None: continue - stack = traceback.extract_stack(allFrames[th.ident]) - self._allStacks[th.native_id] = stack + stack = traceback.extract_stack(allFrames[th.ident]) # Get stack for a thread + shortTid = th.ident % 10000 + self._allStacks[shortTid] = stack # Was using th.native_id def print(self, filteredEndName = None, filterInternal = False): - for thNid, stack in self._allStacks.items(): # for each thread, stack frames top to bottom + for tIdent, stack in self._allStacks.items(): # for each thread, stack frames top to bottom lastFrame = stack[-1] if filteredEndName: # we need to filter out stacks that match this name if lastFrame.name == filteredEndName : # end did not match @@ -2157,7 +2222,7 @@ class ThreadStacks: # stack info for all threads '__init__']: # the thread that extracted the stack continue # ignore # Now print - print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(thNid)) + print("\n<----- Thread Info for LWP/ID: {} (most recent call last) <-----".format(tIdent)) stackFrame = 0 for frame in stack: # was using: reversed(stack) # print(frame) @@ -2376,7 +2441,7 @@ class MainExec: action='store', default=0, type=int, - help='Maximum number of DBs to keep, set to disable dropping DB. (default: 0)') + help='Number of DBs to use, set to disable dropping DB. (default: 0)') parser.add_argument( '-c', '--connector-type', diff --git a/tests/pytest/crash_gen/service_manager.py b/tests/pytest/crash_gen/service_manager.py index 1cd65c1dde4e9c2027a82730cf4fc12290048bbe..c6685ec4691aa6ddcc7b12f45c96cba4432ef327 100644 --- a/tests/pytest/crash_gen/service_manager.py +++ b/tests/pytest/crash_gen/service_manager.py @@ -179,7 +179,7 @@ quorum 2 def getServiceCmdLine(self): # to start the instance if Config.getConfig().track_memory_leaks: Logging.info("Invoking VALGRIND on service...") - return ['exec /usr/bin/valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()] + return ['exec valgrind', '--leak-check=yes', self.getExecFile(), '-c', self.getCfgDir()] else: # TODO: move "exec -c" into Popen(), we can both "use shell" and NOT fork so ask to lose kill control return ["exec " + self.getExecFile(), '-c', self.getCfgDir()] # used in subproce.Popen() @@ -310,7 +310,7 @@ class TdeSubProcess: # print("Starting TDengine with env: ", myEnv.items()) print("Starting TDengine: {}".format(cmdLine)) - return Popen( + ret = Popen( ' '.join(cmdLine), # ' '.join(cmdLine) if useShell else cmdLine, shell=True, # Always use shell, since we need to pass ENV vars stdout=PIPE, @@ -318,6 +318,10 @@ class TdeSubProcess: close_fds=ON_POSIX, env=myEnv ) # had text=True, which interferred with reading EOF + time.sleep(0.01) # very brief wait, then let's check if sub process started successfully. + if ret.poll(): + raise CrashGenError("Sub process failed to start with command line: {}".format(cmdLine)) + return ret STOP_SIGNAL = signal.SIGINT # signal.SIGKILL/SIGINT # What signal to use (in kill) to stop a taosd process? SIG_KILL_RETCODE = 137 # ref: https://stackoverflow.com/questions/43268156/process-finished-with-exit-code-137-in-pycharm @@ -614,7 +618,7 @@ class ServiceManager: # Find if there's already a taosd service, and then kill it for proc in psutil.process_iter(): - if proc.name() == 'taosd': + if proc.name() == 'taosd' or proc.name() == 'memcheck-amd64-': # Regular or under Valgrind Logging.info("Killing an existing TAOSD process in 2 seconds... press CTRL-C to interrupt") time.sleep(2.0) proc.kill() diff --git a/tests/pytest/crash_gen/shared/misc.py b/tests/pytest/crash_gen/shared/misc.py index 90ad802ff1c130439cdc5f5587ecd0607e3e116b..78923bcc29ebb52df1c1a44d5e24ea5159962486 100644 --- a/tests/pytest/crash_gen/shared/misc.py +++ b/tests/pytest/crash_gen/shared/misc.py @@ -35,7 +35,8 @@ class LoggingFilter(logging.Filter): class MyLoggingAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): - return "[{:04d}] {}".format(threading.get_ident() % 10000, msg), kwargs + shortTid = threading.get_ident() % 10000 + return "[{:04d}] {}".format(shortTid, msg), kwargs # return '[%s] %s' % (self.extra['connid'], msg), kwargs diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index be2cfee04b2e2405804fad8d24b77759b6f79b13..951a96e3eee6c54461492ea778d2f601df8824bf 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -31,7 +31,7 @@ python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py python3 ./test.py -f table/db_table.py python3 ./test.py -f table/create_sensitive.py -#python3 ./test.py -f table/tablename-boundary.py +python3 ./test.py -f table/tablename-boundary.py python3 ./test.py -f table/max_table_length.py python3 ./test.py -f table/alter_column.py python3 ./test.py -f table/boundary.py @@ -334,5 +334,5 @@ python3 ./test.py -f tag_lite/alter_tag.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestInsertWithJson.py python3 test.py -f tools/taosdemoAllTest/taosdemoTestQueryWithJson.py - +python3 ./test.py -f tag_lite/drop_auto_create.py #======================p4-end=============== diff --git a/tests/pytest/import_merge/importCSV.py b/tests/pytest/import_merge/importCSV.py index b4441949a1c8e83f15e07d76ceb49de0dc418afe..24ebffd48530e20a9a4f0cc13d4784e997ba4a75 100644 --- a/tests/pytest/import_merge/importCSV.py +++ b/tests/pytest/import_merge/importCSV.py @@ -82,6 +82,8 @@ class TDTestCase: tdSql.execute("import into tbx file \'%s\'"%(self.csvfile)) tdSql.query('select * from tbx') tdSql.checkRows(self.rows) + #TD-4447 import the same csv twice + tdSql.execute("import into tbx file \'%s\'"%(self.csvfile)) def stop(self): self.destroyCSVFile() diff --git a/tests/pytest/insert/nchar.py b/tests/pytest/insert/nchar.py index 3319aa3c565b76d4d7fb9c33b04549e090c8062b..5ad52b96a1555b3ccd622fd4bf88c7a0b26051b5 100644 --- a/tests/pytest/insert/nchar.py +++ b/tests/pytest/insert/nchar.py @@ -36,6 +36,10 @@ class TDTestCase: tdSql.checkData(1, 1, '涛思数据') tdSql.error("insert into tb values (now, 'taosdata001')") + + tdSql.error("insert into tb(now, 😀)") + tdSql.query("select * from tb") + tdSql.checkRows(2) def stop(self): tdSql.close() diff --git a/tests/pytest/table/tablename-boundary.py b/tests/pytest/table/tablename-boundary.py index 0755e7535577b32c52539ab46259b53096455808..dc22c3343b403a93587b7f626061caa62fbf30d1 100644 --- a/tests/pytest/table/tablename-boundary.py +++ b/tests/pytest/table/tablename-boundary.py @@ -14,6 +14,13 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) + self.ts = 1622100000000 + + def get_random_string(self, length): + letters = string.ascii_lowercase + result_str = ''.join(random.choice(letters) for i in range(length)) + return result_str + def run(self): tdSql.prepare() @@ -24,19 +31,62 @@ class TDTestCase: shell=True)) - 1 tdLog.info("table name max length is %d" % tableNameMaxLen) chars = string.ascii_uppercase + string.ascii_lowercase - tb_name = ''.join(random.choices(chars, k=tableNameMaxLen)) + tb_name = ''.join(random.choices(chars, k=tableNameMaxLen + 1)) tdLog.info('tb_name length %d' % len(tb_name)) tdLog.info('create table %s (ts timestamp, value int)' % tb_name) - tdSql.error( - 'create table %s (ts timestamp, speed binary(4089))' % - tb_name) + tdSql.error('create table %s (ts timestamp, speed binary(4089))' % tb_name) - tb_name = ''.join(random.choices(chars, k=191)) + tb_name = ''.join(random.choices(chars, k=tableNameMaxLen)) tdLog.info('tb_name length %d' % len(tb_name)) tdLog.info('create table %s (ts timestamp, value int)' % tb_name) tdSql.execute( 'create table %s (ts timestamp, speed binary(4089))' % tb_name) + + db_name = self.get_random_string(33) + tdSql.error("create database %s" % db_name) + + db_name = self.get_random_string(32) + tdSql.execute("create database %s" % db_name) + tdSql.execute("use %s" % db_name) + + tb_name = self.get_random_string(193) + tdSql.error("create table %s(ts timestamp, val int)" % tb_name) + + tb_name = self.get_random_string(192) + tdSql.execute("create table %s.%s(ts timestamp, val int)" % (db_name, tb_name)) + tdSql.query("show %s.tables" % db_name) + tdSql.checkRows(1) + tdSql.checkData(0, 0, tb_name) + + tdSql.execute("insert into %s.%s values(now, 1)" % (db_name, tb_name)) + tdSql.query("select * from %s.%s" %(db_name, tb_name)) + tdSql.checkRows(1) + + db_name = self.get_random_string(32) + tdSql.execute("create database %s update 1" % db_name) + + stb_name = self.get_random_string(192) + tdSql.execute("create table %s.%s(ts timestamp, val int) tags(id int)" % (db_name, stb_name)) + tb_name1 = self.get_random_string(192) + tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name1, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2)) + tb_name2 = self.get_random_string(192) + tdSql.execute("insert into %s.%s using %s.%s tags(2) values(%d, 1)(%d, 2)(%d, 3)" % (db_name, tb_name2, db_name, stb_name, self.ts, self.ts + 1, self.ts + 2)) + + tdSql.query("show %s.tables" % db_name) + tdSql.checkRows(2) + + tdSql.query("select * from %s.%s" % (db_name, stb_name)) + tdSql.checkRows(6) + + tdSql.execute("insert into %s.%s using %s.%s tags(1) values(%d, null)" % (db_name, tb_name1, db_name, stb_name, self.ts)) + + tdSql.query("select * from %s.%s" % (db_name, stb_name)) + tdSql.checkRows(6) + + + + def stop(self): tdSql.close() diff --git a/tests/pytest/tag_lite/drop_auto_create.py b/tests/pytest/tag_lite/drop_auto_create.py new file mode 100644 index 0000000000000000000000000000000000000000..f89b41008b167884f54cacde3eda11cbc81c0040 --- /dev/null +++ b/tests/pytest/tag_lite/drop_auto_create.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdSql.execute('create table m1(ts timestamp, k int) tags(a binary(12), b int, c double);') + tdSql.execute('insert into tm0 using m1(b,c) tags(1, 99) values(now, 1);') + tdSql.execute('insert into tm1 using m1(b,c) tags(2, 100) values(now, 2);') + tdLog.info("2 rows inserted") + tdSql.query('select * from m1;') + tdSql.checkRows(2) + tdSql.query('select *,tbname from m1;') + tdSql.execute("drop table tm0; ") + tdSql.query('select * from m1') + tdSql.checkRows(1) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase())