提交 27500248 编写于 作者: H Hongze Cheng

Merge branch 'develop' into feature/2.0tsdb

......@@ -56,7 +56,7 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
len = sprintf(buf, "%" PRId64 "", *(int64_t *)pData);
len = sprintf(buf, "%" PRId64, *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
......
......@@ -779,7 +779,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
STagData *pTag = (STagData *)pCmd->payload;
memset(pTag, 0, sizeof(STagData));
pCmd->payloadLen = sizeof(STagData);
/*
* the source super table is moved to the secondary position of the pTableMetaInfo list
......@@ -928,6 +927,14 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
}
}
// 3. calculate the actual data size of STagData
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen);
for (int32_t t = 0; t < numOfTags; ++t) {
pTag->dataLen += pTagSchema[t].bytes;
pCmd->payloadLen += pTagSchema[t].bytes;
}
pTag->dataLen = htonl(pTag->dataLen);
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
......
......@@ -4416,6 +4416,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(pQueryInfo->msg, msg13);
}
pAlterSQL->tagData.dataLen = pTagsSchema->bytes;
// validate the length of binary
if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) &&
......@@ -4539,11 +4540,13 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_INVALID_SQL;
}
const SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[14] = {
{"resetLog", 8}, {"resetQueryCache", 15}, {"dDebugFlag", 10}, {"rpcDebugFlag", 12},
{"tmrDebugFlag", 12}, {"cDebugFlag", 10}, {"uDebugFlag", 10}, {"mDebugFlag", 10},
{"sdbDebugFlag", 12}, {"httpDebugFlag", 13}, {"monitorDebugFlag", 16}, {"qDebugflag", 10},
{"debugFlag", 9}, {"monitor", 7}};
const int DNODE_DYNAMIC_CFG_OPTIONS_SIZE = 17;
const SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[] = {
{"resetLog", 8}, {"resetQueryCache", 15}, {"debugFlag", 9}, {"mDebugFlag", 10},
{"dDebugFlag", 10}, {"sdbDebugFlag", 12}, {"vDebugFlag", 10}, {"cDebugFlag", 10},
{"httpDebugFlag", 13}, {"monitorDebugFlag", 16}, {"rpcDebugFlag", 12}, {"uDebugFlag", 10},
{"tmrDebugFlag", 12}, {"qDebugflag", 10}, {"sDebugflag", 10}, {"tsdbDebugFlag", 13},
{"monitor", 7}};
SSQLToken* pOptionToken = &pOptions->a[1];
......@@ -4555,8 +4558,8 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_SUCCESS;
}
}
} else if ((strncasecmp(DNODE_DYNAMIC_CFG_OPTIONS[13].name, pOptionToken->z, pOptionToken->n) == 0) &&
(DNODE_DYNAMIC_CFG_OPTIONS[13].len == pOptionToken->n)) {
} else if ((strncasecmp(DNODE_DYNAMIC_CFG_OPTIONS[DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1].name, pOptionToken->z, pOptionToken->n) == 0) &&
(DNODE_DYNAMIC_CFG_OPTIONS[DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1].len == pOptionToken->n)) {
SSQLToken* pValToken = &pOptions->a[2];
int32_t val = strtol(pValToken->z, NULL, 10);
if (val != 0 && val != 1) {
......@@ -4572,7 +4575,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_INVALID_SQL;
}
for (int32_t i = 2; i < tListLen(DNODE_DYNAMIC_CFG_OPTIONS) - 1; ++i) {
for (int32_t i = 2; i < DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1; ++i) {
const SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i];
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
......@@ -5550,11 +5553,11 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
// too long tag values will return invalid sql, not be truncated automatically
SSchema* pTagSchema = tscGetTableTagSchema(pStableMeterMetaInfo->pTableMeta);
char* tagVal = pCreateTable->usingInfo.tagdata.data;
STagData* pTag = &pCreateTable->usingInfo.tagdata;
char* tagVal = pTag->data;
int32_t ret = TSDB_CODE_SUCCESS;
for (int32_t i = 0; i < pList->nExpr; ++i) {
if (pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
// validate the length of binary
if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pTagSchema[i].bytes) {
......@@ -5593,6 +5596,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
return ret;
}
pTag->dataLen = tagVal - pTag->data;
return TSDB_CODE_SUCCESS;
}
......
......@@ -1213,8 +1213,13 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int8_t type = pInfo->pCreateTableInfo->type;
if (type == TSQL_CREATE_TABLE_FROM_STABLE) { // create by using super table, tags value
memcpy(pMsg, &pInfo->pCreateTableInfo->usingInfo.tagdata, sizeof(STagData));
pMsg += sizeof(STagData);
STagData* pTag = &pInfo->pCreateTableInfo->usingInfo.tagdata;
*(int32_t*)pMsg = htonl(pTag->dataLen);
pMsg += sizeof(int32_t);
memcpy(pMsg, pTag->name, sizeof(pTag->name));
pMsg += sizeof(pTag->name);
memcpy(pMsg, pTag->data, pTag->dataLen);
pMsg += pTag->dataLen;
} else { // create (super) table
pSchema = (SSchema *)pCreateTableMsg->schema;
......@@ -1281,9 +1286,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
strcpy(pAlterTableMsg->tableId, pTableMetaInfo->name);
pAlterTableMsg->type = htons(pAlterInfo->type);
pAlterTableMsg->numOfCols = tscNumOfFields(pQueryInfo);
memcpy(pAlterTableMsg->tagVal, pAlterInfo->tagData.data, TSDB_MAX_TAGS_LEN);
pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
SSchema *pSchema = pAlterTableMsg->schema;
for (int i = 0; i < pAlterTableMsg->numOfCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
......@@ -1295,6 +1298,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
pMsg = (char *)pSchema;
pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
pMsg += pAlterInfo->tagData.dataLen;
msgLen = pMsg - (char*)pAlterTableMsg;
pCmd->payloadLen = msgLen;
......
......@@ -165,7 +165,7 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) {
if (timestamp != actualTimestamp) {
// reset the timestamp of each agg point by using start time of each interval
*((int64_t *)pRes->data) = actualTimestamp;
tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64 "", pSql, pStream, timestamp, actualTimestamp);
tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64, pSql, pStream, timestamp, actualTimestamp);
}
}
......@@ -287,10 +287,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
return;
}
tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
now + timer, timer, delay, pStream->stime, etime);
} else {
tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1);
}
......@@ -380,7 +380,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->intervalTime < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream,
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
}
......@@ -397,14 +397,14 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
if (pQueryInfo->slidingTime == -1) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
} else if (pQueryInfo->slidingTime < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream,
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
pQueryInfo->slidingTime = minSlidingTime;
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream,
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, pQueryInfo->intervalTime);
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
......@@ -433,11 +433,11 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64 "", pSql, pStream, stime);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
} else {
int64_t newStime = (stime / pStream->interval) * pStream->interval;
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64 "", pSql, pStream, stime, newStime);
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
}
}
......
......@@ -107,7 +107,6 @@ int32_t tsReplications = TSDB_DEFAULT_REPLICA_NUM;
int16_t tsAffectedRowsMod = 0;
int32_t tsNumOfMPeers = 3;
int32_t tsMaxShellConns = 2000;
int32_t tsMaxTables = 100000;
char tsDefaultDB[TSDB_DB_NAME_LEN] = {0};
char tsDefaultUser[64] = "root";
......
......@@ -34,6 +34,7 @@ class TDengineCursor(object):
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
if connection is not None:
self._connection = connection
......@@ -83,6 +84,9 @@ class TDengineCursor(object):
"""
pass
def log(self, logfile):
self._logfile = logfile
def close(self):
"""Close the cursor.
"""
......@@ -113,6 +117,11 @@ class TDengineCursor(object):
pass
res = CTaosInterface.query(self._connection._conn, stmt)
if (self._logfile):
with open(self._logfile, "a") as logfile:
logfile.write("%s;\n" % operation)
if res == 0:
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
self._affected_rows += CTaosInterface.affectedRows(
......
......@@ -36,6 +36,7 @@ class TDengineCursor(object):
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
self._logfile = ""
if connection is not None:
self._connection = connection
......@@ -85,6 +86,9 @@ class TDengineCursor(object):
"""
pass
def log(self, logfile):
self._logfile = logfile
def close(self):
"""Close the cursor.
"""
......@@ -121,6 +125,10 @@ class TDengineCursor(object):
res = CTaosInterface.query(self._connection._conn, stmt)
# print(" << Query ({}) Exec Done".format(localSeqNum))
if (self._logfile):
with open(self._logfile, "a") as logfile:
logfile.write("%s;\n" % operation)
if res == 0:
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
self._affected_rows += CTaosInterface.affectedRows(
......
......@@ -114,18 +114,20 @@ void dnodeStartModules() {
}
void dnodeProcessModuleStatus(uint32_t moduleStatus) {
bool enableMgmtModule = moduleStatus & (1 << TSDB_MOD_MGMT);
if (!tsModule[TSDB_MOD_MGMT].enable && enableMgmtModule) {
dPrint("module status is received, start mgmt module", tsModuleStatus, moduleStatus);
tsModule[TSDB_MOD_MGMT].enable = true;
dnodeSetModuleStatus(TSDB_MOD_MGMT);
(*tsModule[TSDB_MOD_MGMT].startFp)();
}
for (int32_t module = TSDB_MOD_MGMT; module < TSDB_MOD_HTTP; ++module) {
bool enableModule = moduleStatus & (1 << module);
if (!tsModule[module].enable && enableModule) {
dPrint("module status:%u is received, start %s module", tsModuleStatus, tsModule[module].name);
tsModule[module].enable = true;
dnodeSetModuleStatus(module);
(*tsModule[module].startFp)();
}
if (tsModule[TSDB_MOD_MGMT].enable && !enableMgmtModule) {
dPrint("module status is received, stop mgmt module", tsModuleStatus, moduleStatus);
tsModule[TSDB_MOD_MGMT].enable = false;
dnodeUnSetModuleStatus(TSDB_MOD_MGMT);
(*tsModule[TSDB_MOD_MGMT].stopFp)();
if (tsModule[module].enable && !enableModule) {
dPrint("module status:%u is received, stop %s module", tsModuleStatus, tsModule[module].name);
tsModule[module].enable = false;
dnodeUnSetModuleStatus(module);
(*tsModule[module].stopFp)();
}
}
}
......@@ -340,13 +340,14 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_SESSIONS_PER_VNODE (300)
#define TSDB_SESSIONS_PER_DNODE (TSDB_SESSIONS_PER_VNODE * TSDB_MAX_VNODES)
#define TSDB_MAX_MNODES 5
#define TSDB_MAX_DNODES 10
#define TSDB_MAX_ACCOUNTS 10
#define TSDB_MAX_USERS 20
#define TSDB_MAX_DBS 100
#define TSDB_MAX_VGROUPS 1000
#define TSDB_MAX_SUPER_TABLES 100
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
#define TSDB_DEFAULT_USERS_HASH_SIZE 20
#define TSDB_DEFAULT_DBS_HASH_SIZE 100
#define TSDB_DEFAULT_VGROUPS_HASH_SIZE 100
#define TSDB_DEFAULT_STABLES_HASH_SIZE 100
#define TSDB_DEFAULT_CTABLES_HASH_SIZE 10000
#define TSDB_PORT_DNODESHELL 0
#define TSDB_PORT_DNODEDNODE 5
......
......@@ -156,6 +156,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SERV_OUT_OF_MEMORY, 0, 405, "server out of m
TAOS_DEFINE_ERROR(TSDB_CODE_NO_DISK_PERMISSIONS, 0, 406, "no disk permissions")
TAOS_DEFINE_ERROR(TSDB_CODE_FILE_CORRUPTED, 0, 407, "file corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_MEMORY_CORRUPTED, 0, 408, "memory corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUCH_FILE_OR_DIR, 0, 409, "no such file or directory")
// client
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CLIENT_VERSION, 0, 451, "invalid client version")
......
......@@ -269,9 +269,11 @@ typedef struct {
char tableId[TSDB_TABLE_ID_LEN + 1];
char db[TSDB_DB_NAME_LEN + 1];
int16_t type; /* operation type */
char tagVal[TSDB_MAX_BYTES_PER_ROW];
int8_t numOfCols; /* number of schema */
int16_t numOfCols; /* number of schema */
int32_t tagValLen;
SSchema schema[];
// tagVal is padded after schema
// char tagVal[];
} SCMAlterTableMsg;
typedef struct {
......@@ -647,6 +649,7 @@ typedef struct SMultiTableMeta {
} SMultiTableMeta;
typedef struct {
int32_t dataLen;
char name[TSDB_TABLE_ID_LEN + 1];
char data[TSDB_MAX_TAGS_LEN];
} STagData;
......
......@@ -37,6 +37,8 @@
#define COMMAND_SIZE 65536
#define DEFAULT_DUMP_FILE "taosdump.sql"
#define MAX_DBS 100
int converStringToReadable(char *str, int size, char *buf, int bufsize);
int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
void taosDumpCharset(FILE *fp);
......@@ -359,7 +361,7 @@ int main(int argc, char *argv[]) {
void taosFreeDbInfos() {
if (dbInfos == NULL) return;
for (int i = 0; i < TSDB_MAX_DBS; i++) tfree(dbInfos[i]);
for (int i = 0; i < MAX_DBS; i++) tfree(dbInfos[i]);
tfree(dbInfos);
}
......@@ -437,7 +439,7 @@ int taosDumpOut(SDumpArguments *arguments) {
return -1;
}
dbInfos = (SDbInfo **)calloc(TSDB_MAX_DBS, sizeof(SDbInfo *));
dbInfos = (SDbInfo **)calloc(MAX_DBS, sizeof(SDbInfo *));
if (dbInfos == NULL) {
fprintf(stderr, "failed to allocate memory\n");
goto _exit_failure;
......@@ -941,7 +943,7 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) {
pstr += sprintf(pstr, "%d", *((int *)row[col]));
break;
case TSDB_DATA_TYPE_BIGINT:
pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col]));
pstr += sprintf(pstr, "%" PRId64, *((int64_t *)row[col]));
break;
case TSDB_DATA_TYPE_FLOAT:
pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col]));
......@@ -960,7 +962,7 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) {
pstr += sprintf(pstr, "\'%s\'", tbuf);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]);
pstr += sprintf(pstr, "%" PRId64, *(int64_t *)row[col]);
break;
default:
break;
......
......@@ -44,7 +44,7 @@ void mgmtDecMnodeRef(struct SMnodeObj *pMnode);
char * mgmtGetMnodeRoleStr();
void mgmtGetMnodeIpSet(SRpcIpSet *ipSet);
void mgmtGetMnodeInfos(void *mnodes);
void mgmtUpdateMnodeIpSet();
#ifdef __cplusplus
}
......
......@@ -29,6 +29,7 @@ void mgmtIncVgroupRef(SVgObj *pVgroup);
void mgmtDecVgroupRef(SVgObj *pVgroup);
void mgmtDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg);
void mgmtDropAllDnodeVgroups(SDnodeObj *pDropDnode);
void mgmtUpdateAllDbVgroups(SDbObj *pAlterDb);
void * mgmtGetNextVgroup(void *pIter, SVgObj **pVgroup);
void mgmtUpdateVgroup(SVgObj *pVgroup);
......
......@@ -94,7 +94,7 @@ int32_t mgmtInitAccts() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_ACCOUNT,
.tableName = "accounts",
.hashSessions = TSDB_MAX_ACCOUNTS,
.hashSessions = TSDB_DEFAULT_ACCOUNTS_HASH_SIZE,
.maxRowSize = tsAcctUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_STRING,
......
......@@ -96,6 +96,7 @@ static int32_t mgmtDbActionUpdate(SSdbOper *pOper) {
memcpy(pSaved, pDb, pOper->rowSize);
free(pDb);
}
mgmtUpdateAllDbVgroups(pSaved);
mgmtDecDbRef(pSaved);
return TSDB_CODE_SUCCESS;
}
......@@ -127,7 +128,7 @@ int32_t mgmtInitDbs() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_DB,
.tableName = "dbs",
.hashSessions = TSDB_MAX_DBS,
.hashSessions = TSDB_DEFAULT_DBS_HASH_SIZE,
.maxRowSize = tsDbUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_STRING,
......
......@@ -130,7 +130,7 @@ int32_t mgmtInitDnodes() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_DNODE,
.tableName = "dnodes",
.hashSessions = TSDB_MAX_DNODES,
.hashSessions = TSDB_DEFAULT_DNODES_HASH_SIZE,
.maxRowSize = tsDnodeUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_AUTO,
......
......@@ -36,6 +36,25 @@ static int32_t tsMnodeUpdateSize = 0;
static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mgmtRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static SRpcIpSet tsMnodeRpcIpSet;
static SDMMnodeInfos tsMnodeInfos;
#if defined(LINUX)
static pthread_rwlock_t tsMnodeLock;
#define mgmtMnodeWrLock() pthread_rwlock_wrlock(&tsMnodeLock)
#define mgmtMnodeRdLock() pthread_rwlock_rdlock(&tsMnodeLock)
#define mgmtMnodeUnLock() pthread_rwlock_unlock(&tsMnodeLock)
#define mgmtMnodeInitLock() pthread_rwlock_init(&tsMnodeLock, NULL)
#define mgmtMnodeDestroyLock() pthread_rwlock_destroy(&tsMnodeLock)
#else
static pthread_mutex_t tsMnodeLock;
#define mgmtMnodeWrLock() pthread_mutex_lock(&tsMnodeLock)
#define mgmtMnodeRdLock() pthread_mutex_lock(&tsMnodeLock)
#define mgmtMnodeUnLock() pthread_mutex_unlock(&tsMnodeLock)
#define mgmtMnodeInitLock() pthread_mutex_init(&tsMnodeLock, NULL)
#define mgmtMnodeDestroyLock() pthread_mutex_destroy(&tsMnodeLock)
#endif
static int32_t mgmtMnodeActionDestroy(SSdbOper *pOper) {
tfree(pOper->pObj);
return TSDB_CODE_SUCCESS;
......@@ -102,17 +121,22 @@ static int32_t mgmtMnodeActionRestored() {
}
sdbFreeIter(pIter);
}
mgmtUpdateMnodeIpSet();
return TSDB_CODE_SUCCESS;
}
int32_t mgmtInitMnodes() {
mgmtMnodeInitLock();
SMnodeObj tObj;
tsMnodeUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj;
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_MNODE,
.tableName = "mnodes",
.hashSessions = TSDB_MAX_MNODES,
.hashSessions = TSDB_DEFAULT_MNODES_HASH_SIZE,
.maxRowSize = tsMnodeUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_INT,
......@@ -140,6 +164,7 @@ int32_t mgmtInitMnodes() {
void mgmtCleanupMnodes() {
sdbCloseTable(tsMnodeSdb);
mgmtMnodeDestroyLock();
}
int32_t mgmtGetMnodesNum() {
......@@ -177,50 +202,57 @@ char *mgmtGetMnodeRoleStr(int32_t role) {
}
}
void mgmtGetMnodeIpSet(SRpcIpSet *ipSet) {
void *pIter = NULL;
while (1) {
SMnodeObj *pMnode = NULL;
pIter = mgmtGetNextMnode(pIter, &pMnode);
if (pMnode == NULL) break;
strcpy(ipSet->fqdn[ipSet->numOfIps], pMnode->pDnode->dnodeFqdn);
ipSet->port[ipSet->numOfIps] = htons(pMnode->pDnode->dnodePort);
void mgmtUpdateMnodeIpSet() {
SRpcIpSet *ipSet = &tsMnodeRpcIpSet;
SDMMnodeInfos *mnodes = &tsMnodeInfos;
if (pMnode->role == TAOS_SYNC_ROLE_MASTER) {
ipSet->inUse = ipSet->numOfIps;
}
mPrint("update mnodes ipset, numOfIps:%d ", mgmtGetMnodesNum());
ipSet->numOfIps++;
mgmtDecMnodeRef(pMnode);
}
sdbFreeIter(pIter);
}
mgmtMnodeWrLock();
void mgmtGetMnodeInfos(void *param) {
SDMMnodeInfos *mnodes = param;
mnodes->inUse = 0;
int32_t index = 0;
void *pIter = NULL;
void * pIter = NULL;
while (1) {
SMnodeObj *pMnode = NULL;
pIter = mgmtGetNextMnode(pIter, &pMnode);
if (pMnode == NULL) break;
strcpy(ipSet->fqdn[ipSet->numOfIps], pMnode->pDnode->dnodeFqdn);
ipSet->port[ipSet->numOfIps] = htons(pMnode->pDnode->dnodePort);
mnodes->nodeInfos[index].nodeId = htonl(pMnode->mnodeId);
strcpy(mnodes->nodeInfos[index].nodeEp, pMnode->pDnode->dnodeEp);
if (pMnode->role == TAOS_SYNC_ROLE_MASTER) {
ipSet->inUse = ipSet->numOfIps;
mnodes->inUse = index;
}
mPrint("mnode:%d, ep:%s %s", index, pMnode->pDnode->dnodeEp, pMnode->role == TAOS_SYNC_ROLE_MASTER ? "master" : "");
ipSet->numOfIps++;
index++;
mgmtDecMnodeRef(pMnode);
}
sdbFreeIter(pIter);
mnodes->nodeNum = index;
sdbFreeIter(pIter);
mgmtMnodeUnLock();
}
void mgmtGetMnodeIpSet(SRpcIpSet *ipSet) {
mgmtMnodeRdLock();
*ipSet = tsMnodeRpcIpSet;
mgmtMnodeUnLock();
}
void mgmtGetMnodeInfos(void *mnodeInfos) {
mgmtMnodeRdLock();
*(SDMMnodeInfos *)mnodeInfos = tsMnodeInfos;
mgmtMnodeUnLock();
}
int32_t mgmtAddMnode(int32_t dnodeId) {
......@@ -240,6 +272,8 @@ int32_t mgmtAddMnode(int32_t dnodeId) {
code = TSDB_CODE_SDB_ERROR;
}
mgmtUpdateMnodeIpSet();
return code;
}
......@@ -250,6 +284,8 @@ void mgmtDropMnodeLocal(int32_t dnodeId) {
sdbDeleteRow(&oper);
mgmtDecMnodeRef(pMnode);
}
mgmtUpdateMnodeIpSet();
}
int32_t mgmtDropMnode(int32_t dnodeId) {
......@@ -270,6 +306,9 @@ int32_t mgmtDropMnode(int32_t dnodeId) {
}
sdbDecRef(tsMnodeSdb, pMnode);
mgmtUpdateMnodeIpSet();
return code;
}
......
......@@ -196,6 +196,8 @@ void sdbUpdateMnodeRoles() {
mgmtDecMnodeRef(pMnode);
}
}
mgmtUpdateMnodeIpSet();
}
static uint32_t sdbGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size, uint64_t *fversion) {
......@@ -442,8 +444,8 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
pthread_mutex_unlock(&pTable->mutex);
sdbTrace("table:%s, insert record:%s to hash, numOfRows:%d version:%" PRIu64, pTable->tableName,
sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion());
sdbTrace("table:%s, insert record:%s to hash, rowSize:%d vnumOfRows:%d version:%" PRIu64, pTable->tableName,
sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, sdbGetVersion());
(*pTable->insertFp)(pOper);
return TSDB_CODE_SUCCESS;
......
......@@ -119,7 +119,7 @@ static void mgmtDoDealyedAddToShellQueue(void *param, void *tmrId) {
void mgmtDealyedAddToShellQueue(SQueuedMsg *queuedMsg) {
void *unUsed = NULL;
taosTmrReset(mgmtDoDealyedAddToShellQueue, 1000, queuedMsg, tsMgmtTmr, &unUsed);
taosTmrReset(mgmtDoDealyedAddToShellQueue, 300, queuedMsg, tsMgmtTmr, &unUsed);
}
void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) {
......
......@@ -340,7 +340,7 @@ static int32_t mgmtInitChildTables() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_CTABLE,
.tableName = "ctables",
.hashSessions = tsMaxTables,
.hashSessions = TSDB_DEFAULT_CTABLES_HASH_SIZE,
.maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN + TSDB_CQ_SQL_SIZE,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_VAR_STRING,
......@@ -507,7 +507,7 @@ static int32_t mgmtInitSuperTables() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_STABLE,
.tableName = "stables",
.hashSessions = TSDB_MAX_SUPER_TABLES,
.hashSessions = TSDB_DEFAULT_STABLES_HASH_SIZE,
.maxRowSize = sizeof(SSuperTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_VAR_STRING,
......@@ -1334,13 +1334,13 @@ static void mgmtProcessDropSuperTableRsp(SRpcMsg *rpcMsg) {
}
static void *mgmtBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableObj *pTable) {
char * pTagData = NULL;
STagData * pTagData = NULL;
int32_t tagDataLen = 0;
int32_t totalCols = 0;
int32_t contLen = 0;
if (pTable->info.type == TSDB_CHILD_TABLE && pMsg != NULL) {
pTagData = pMsg->schema + TSDB_TABLE_ID_LEN + 1;
tagDataLen = htonl(pMsg->contLen) - sizeof(SCMCreateTableMsg) - TSDB_TABLE_ID_LEN - 1;
pTagData = (STagData*)pMsg->schema;
tagDataLen = ntohl(pTagData->dataLen);
totalCols = pTable->superTable->numOfColumns + pTable->superTable->numOfTags;
contLen = sizeof(SMDCreateTableMsg) + totalCols * sizeof(SSchema) + tagDataLen + pTable->sqlLen;
} else {
......@@ -1393,7 +1393,7 @@ static void *mgmtBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableOb
}
if (pTable->info.type == TSDB_CHILD_TABLE && pMsg != NULL) {
memcpy(pCreate->data + totalCols * sizeof(SSchema), pTagData, tagDataLen);
memcpy(pCreate->data + totalCols * sizeof(SSchema), pTagData->data, tagDataLen);
memcpy(pCreate->data + totalCols * sizeof(SSchema) + tagDataLen, pTable->sql, pTable->sqlLen);
}
......@@ -1420,10 +1420,10 @@ static SChildTableObj* mgmtDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgObj
pTable->vgId = pVgroup->vgId;
if (pTable->info.type == TSDB_CHILD_TABLE) {
char *pTagData = (char *) pCreate->schema; // it is a tag key
SSuperTableObj *pSuperTable = mgmtGetSuperTable(pTagData);
STagData *pTagData = (STagData *) pCreate->schema; // it is a tag key
SSuperTableObj *pSuperTable = mgmtGetSuperTable(pTagData->name);
if (pSuperTable == NULL) {
mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData);
mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData->name);
free(pTable);
terrno = TSDB_CODE_INVALID_TABLE;
return NULL;
......@@ -1538,7 +1538,7 @@ static void mgmtProcessCreateChildTableMsg(SQueuedMsg *pMsg) {
SRpcIpSet ipSet = mgmtGetIpSetFromVgroup(pVgroup);
SQueuedMsg *newMsg = mgmtCloneQueuedMsg(pMsg);
newMsg->ahandle = pMsg->pTable;
newMsg->maxRetry = 5;
newMsg->maxRetry = 10;
SRpcMsg rpcMsg = {
.handle = newMsg,
.pCont = pMDCreate,
......@@ -1742,7 +1742,9 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) {
static void mgmtAutoCreateChildTable(SQueuedMsg *pMsg) {
SCMTableInfoMsg *pInfo = pMsg->pCont;
int32_t contLen = sizeof(SCMCreateTableMsg) + sizeof(STagData);
STagData* pTag = (STagData*)pInfo->tags;
int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + ntohl(pTag->dataLen);
SCMCreateTableMsg *pCreateMsg = rpcMallocCont(contLen);
if (pCreateMsg == NULL) {
mError("table:%s, failed to create table while get meta info, no enough memory", pInfo->tableId);
......@@ -1756,14 +1758,9 @@ static void mgmtAutoCreateChildTable(SQueuedMsg *pMsg) {
pCreateMsg->getMeta = 1;
pCreateMsg->contLen = htonl(contLen);
contLen = sizeof(STagData);
if (contLen > pMsg->contLen - sizeof(SCMTableInfoMsg)) {
contLen = pMsg->contLen - sizeof(SCMTableInfoMsg);
}
memcpy(pCreateMsg->schema, pInfo->tags, contLen);
memcpy(pCreateMsg->schema, pInfo->tags, contLen - sizeof(SCMCreateTableMsg));
SQueuedMsg *newMsg = mgmtCloneQueuedMsg(pMsg);
pMsg->pCont = newMsg->pCont;
newMsg->msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE;
newMsg->pCont = pCreateMsg;
......@@ -2201,6 +2198,8 @@ static void mgmtProcessAlterTableMsg(SQueuedMsg *pMsg) {
}
pAlter->type = htons(pAlter->type);
pAlter->numOfCols = htons(pAlter->numOfCols);
pAlter->tagValLen = htonl(pAlter->tagValLen);
if (pAlter->numOfCols > 2) {
mError("table:%s, error numOfCols:%d in alter table", pAlter->tableId, pAlter->numOfCols);
......@@ -2232,7 +2231,8 @@ static void mgmtProcessAlterTableMsg(SQueuedMsg *pMsg) {
mTrace("table:%s, start to alter ctable", pAlter->tableId);
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
if (pAlter->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) {
code = mgmtModifyChildTableTagValue(pTable, pAlter->schema[0].name, pAlter->tagVal);
char *tagVal = (char*)(pAlter->schema + pAlter->numOfCols);
code = mgmtModifyChildTableTagValue(pTable, pAlter->schema[0].name, tagVal);
} else if (pAlter->type == TSDB_ALTER_TABLE_ADD_COLUMN) {
code = mgmtAddNormalTableColumn(pMsg->pDb, pTable, pAlter->schema, 1);
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
......
......@@ -117,7 +117,7 @@ int32_t mgmtInitUsers() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_USER,
.tableName = "users",
.hashSessions = TSDB_MAX_USERS,
.hashSessions = TSDB_DEFAULT_USERS_HASH_SIZE,
.maxRowSize = tsUserUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_STRING,
......
......@@ -121,6 +121,20 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS;
}
static void mgmtVgroupUpdateIdPool(SVgObj *pVgroup) {
int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
SDbObj *pDb = pVgroup->pDb;
if (pDb != NULL) {
if (pDb->cfg.maxTables != oldTables) {
mPrint("vgId:%d tables change from %d to %d", pVgroup->vgId, oldTables, pDb->cfg.maxTables);
taosUpdateIdPool(pVgroup->idPool, pDb->cfg.maxTables);
int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables;
pVgroup->tableList = (SChildTableObj **)realloc(pVgroup->tableList, size);
memset(pVgroup->tableList + oldTables, 0, (pDb->cfg.maxTables - oldTables) * sizeof(SChildTableObj **));
}
}
}
static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
SVgObj *pNew = pOper->pObj;
SVgObj *pVgroup = mgmtGetVgroup(pNew->vgId);
......@@ -146,20 +160,11 @@ static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
}
}
int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
SDbObj *pDb = pVgroup->pDb;
if (pDb != NULL) {
if (pDb->cfg.maxTables != oldTables) {
mPrint("vgId:%d tables change from %d to %d", pVgroup->vgId, oldTables, pDb->cfg.maxTables);
taosUpdateIdPool(pVgroup->idPool, pDb->cfg.maxTables);
int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables;
pVgroup->tableList = (SChildTableObj **)realloc(pVgroup->tableList, size);
}
}
mgmtVgroupUpdateIdPool(pVgroup);
mgmtDecVgroupRef(pVgroup);
mTrace("vgId:%d, is updated, numOfVnode:%d tables:%d", pVgroup->vgId, pVgroup->numOfVnodes, pDb == NULL ? 0 : pDb->cfg.maxTables);
mTrace("vgId:%d, is updated, numOfVnode:%d", pVgroup->vgId, pVgroup->numOfVnodes);
return TSDB_CODE_SUCCESS;
}
......@@ -196,7 +201,7 @@ int32_t mgmtInitVgroups() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_VGROUP,
.tableName = "vgroups",
.hashSessions = TSDB_MAX_VGROUPS,
.hashSessions = TSDB_DEFAULT_VGROUPS_HASH_SIZE,
.maxRowSize = tsVgUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_AUTO,
......@@ -762,6 +767,28 @@ void mgmtDropAllDnodeVgroups(SDnodeObj *pDropDnode) {
sdbFreeIter(pIter);
}
void mgmtUpdateAllDbVgroups(SDbObj *pAlterDb) {
void * pIter = NULL;
SVgObj *pVgroup = NULL;
mPrint("db:%s, all vgroups will be update in sdb", pAlterDb->name);
while (1) {
pIter = mgmtGetNextVgroup(pIter, &pVgroup);
if (pVgroup == NULL) break;
if (pVgroup->pDb == pAlterDb) {
mgmtVgroupUpdateIdPool(pVgroup);
}
mgmtDecVgroupRef(pVgroup);
}
sdbFreeIter(pIter);
mPrint("db:%s, all vgroups is updated in sdb", pAlterDb->name);
}
void mgmtDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) {
void * pIter = NULL;
int32_t numOfVgroups = 0;
......
......@@ -4022,7 +4022,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
pTableQueryInfo->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.window.ekey : blockInfo.window.skey;
pTableQueryInfo->lastKey += step;
qTrace("QInfo:%p skip rows:%d, offset:%" PRId64 "", GET_QINFO_ADDR(pRuntimeEnv), blockInfo.rows,
qTrace("QInfo:%p skip rows:%d, offset:%" PRId64, GET_QINFO_ADDR(pRuntimeEnv), blockInfo.rows,
pQuery->limit.offset);
} else { // find the appropriated start position in current block
updateOffsetVal(pRuntimeEnv, &blockInfo);
......@@ -5134,7 +5134,7 @@ bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SC
static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
if (pQueryMsg->intervalTime < 0) {
qError("qmsg:%p illegal value of interval time %" PRId64 "", pQueryMsg, pQueryMsg->intervalTime);
qError("qmsg:%p illegal value of interval time %" PRId64, pQueryMsg, pQueryMsg->intervalTime);
return false;
}
......
......@@ -162,6 +162,7 @@ int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) {
pCache->totalCacheBlocks = totalBlocks;
tsdbAdjustCacheBlocks(pCache);
}
pRepo->config.totalBlocks = totalBlocks;
tsdbUnLockRepo((TsdbRepoT *)pRepo);
tsdbTrace("vgId:%d, tsdb total cache blocks changed from %d to %d", pRepo->config.tsdbId, oldNumOfBlocks, totalBlocks);
......
......@@ -37,7 +37,8 @@ static TSKEY tsdbNextIterKey(SSkipListIterator *pIter);
static int tsdbHasDataToCommit(SSkipListIterator **iters, int nIters, TSKEY minKey, TSKEY maxKey);
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression);
static void tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep);
static void tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables);
static void tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables);
static int32_t tsdbSaveConfig(STsdbRepo *pRepo);
#define TSDB_GET_TABLE_BY_ID(pRepo, sid) (((STSDBRepo *)pRepo)->pTableList)[sid]
#define TSDB_GET_TABLE_BY_NAME(pRepo, name)
......@@ -319,10 +320,25 @@ int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg) {
ASSERT(pRCfg->maxRowsPerFileBlock == pCfg->maxRowsPerFileBlock);
ASSERT(pRCfg->precision == pCfg->precision);
if (pRCfg->compression != pCfg->compression) tsdbAlterCompression(pRepo, pCfg->compression);
if (pRCfg->keep != pCfg->keep) tsdbAlterKeep(pRepo, pCfg->keep);
if (pRCfg->totalBlocks != pCfg->totalBlocks) tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks);
if (pRCfg->maxTables != pCfg->maxTables) tsdbAlterMaxTables(pRepo, pCfg->maxTables);
bool configChanged = false;
if (pRCfg->compression != pCfg->compression) {
configChanged = true;
tsdbAlterCompression(pRepo, pCfg->compression);
}
if (pRCfg->keep != pCfg->keep) {
configChanged = true;
tsdbAlterKeep(pRepo, pCfg->keep);
}
if (pRCfg->totalBlocks != pCfg->totalBlocks) {
configChanged = true;
tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks);
}
if (pRCfg->maxTables != pCfg->maxTables) {
configChanged = true;
tsdbAlterMaxTables(pRepo, pCfg->maxTables);
}
if (configChanged) tsdbSaveConfig(pRepo);
return TSDB_CODE_SUCCESS;
}
......@@ -1134,8 +1150,10 @@ static void tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) {
int maxFiles = keep / pCfg->maxTables + 3;
if (pRepo->config.keep > keep) {
pRepo->config.keep = keep;
pRepo->tsdbFileH->maxFGroups = maxFiles;
} else {
pRepo->config.keep = keep;
pRepo->tsdbFileH->fGroup = realloc(pRepo->tsdbFileH->fGroup, sizeof(SFileGroup));
if (pRepo->tsdbFileH->fGroup == NULL) {
// TODO: deal with the error
......@@ -1155,6 +1173,8 @@ static void tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables) {
pMeta->maxTables = maxTables;
pMeta->tables = realloc(pMeta->tables, maxTables * sizeof(STable *));
memset(&pMeta->tables[oldMaxTables], 0, sizeof(STable *) * (maxTables-oldMaxTables));
pRepo->config.maxTables = maxTables;
tsdbTrace("vgId:%d, tsdb maxTables is changed from %d to %d!", pRepo->config.tsdbId, oldMaxTables, maxTables);
}
......
......@@ -127,7 +127,7 @@ int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
if (pTable->type == TSDB_SUPER_TABLE) {
STColumn* pColSchema = schemaColAt(pTable->tagSchema, 0);
pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, pColSchema->type, pColSchema->bytes,
1, 0, 0, getTagIndexKey);
1, 0, 1, getTagIndexKey);
}
tsdbAddTableToMeta(pMeta, pTable, false);
......@@ -323,7 +323,7 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
// index the first tag column
STColumn* pColSchema = schemaColAt(super->tagSchema, 0);
super->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, pColSchema->type, pColSchema->bytes,
1, 0, 0, getTagIndexKey); // Allow duplicate key, no lock
1, 0, 1, getTagIndexKey); // Allow duplicate key, no lock
if (super->pIndex == NULL) {
tdFreeSchema(super->schema);
......@@ -447,6 +447,7 @@ static int tsdbFreeTable(STable *pTable) {
// Free content
if (TSDB_TABLE_IS_SUPER_TABLE(pTable)) {
tdFreeSchema(pTable->tagSchema);
tSkipListDestroy(pTable->pIndex);
}
......
......@@ -409,7 +409,7 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) {
if (pIdx->offset > 0) {
pIdx->offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
if (pIdx->offset < 0) return -1;
ASSERT(pIdx->offset >= tsizeof(pHelper->pCompIdx));
ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE);
if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, pIdx->len) < pIdx->len) return -1;
}
......@@ -489,6 +489,7 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
}
ASSERT(((char *)ptr - (char *)pHelper->pBuffer) == (pFile->info.len - sizeof(TSCKSUM)));
if (lseek(fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) return -1;
}
}
......
......@@ -73,13 +73,18 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
return TSDB_CODE_SUCCESS;
}
mkdir(tsVnodeDir, 0755);
char rootDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId);
if (mkdir(rootDir, 0755) != 0) {
vPrint("vgId:%d, failed to create vnode, reason:%s dir:%s", pVnodeCfg->cfg.vgId, strerror(errno), rootDir);
if (errno == EACCES) {
return TSDB_CODE_NO_DISK_PERMISSIONS;
} else if (errno == ENOSPC) {
return TSDB_CODE_SERV_NO_DISKSPACE;
} else if (errno == ENOENT) {
return TSDB_CODE_NOT_SUCH_FILE_OR_DIR;
} else if (errno == EEXIST) {
} else {
return TSDB_CODE_VG_INIT_FAILED;
......@@ -239,6 +244,10 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
syncInfo.notifyFileSynced = vnodeNotifyFileSynced;
pVnode->sync = syncStart(&syncInfo);
#ifndef _SYNC
pVnode->role = TAOS_SYNC_ROLE_MASTER;
#endif
// start continuous query
if (pVnode->role == TAOS_SYNC_ROLE_MASTER)
cqStart(pVnode->cq);
......@@ -429,7 +438,7 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) {
static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion) {
SVnodeObj *pVnode = ahandle;
vTrace("vgId:%d, data file is synced, fversion:%" PRId64 "", pVnode->vgId, fversion);
vTrace("vgId:%d, data file is synced, fversion:%" PRId64, pVnode->vgId, fversion);
pVnode->fversion = fversion;
pVnode->version = fversion;
......
......@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
try:
......
......@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
print("==========step1")
......
......@@ -9,9 +9,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
self.types = [
"int",
"bigint",
......
......@@ -9,9 +9,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
self.types = [
"int",
"bigint",
......
......@@ -19,9 +19,9 @@ from util.sql import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
......
......@@ -23,9 +23,9 @@ from util.sql import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
......
......@@ -19,9 +19,9 @@ from util.sql import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
......
......@@ -125,3 +125,6 @@ python3 ./test.py $1 -f user/pass_len.py
# table
#python3 ./test.py $1 -f table/del_stable.py
#query
python3 ./test.py $1 -f query/filter.py
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
......@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn):
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册