提交 e55066be 编写于 作者: H Haojun Liao

Merge branch 'develop' of https://github.com/taosdata/TDengine into develop

......@@ -15,28 +15,15 @@
# arbitrator arbitrator_hostname:6030
# the full-qualified domain name (FQDN) of dnode
# fqdn hostname:6030
# fqdn hostname
# port for MNode connect to Client, default udp[6030-6055] tcp[6030]
# port for MNode connect to Client, default udp/tcp [6030-6040]
# serverPort 6030
# http service port, default tcp[6020]
# http service port, default tcp [6020]
# httpPort 6020
# set socket type ("udp" and "tcp")
# the server and client should have the same socket type. Otherwise, connect will fail
# sockettype udp
# for the cluster version, data file's directory is configured this way
# option mount_path tier_level
# dataDir /mnt/disk1/taos 0
# dataDir /mnt/disk2/taos 0
# dataDir /mnt/disk3/taos 0
# dataDir /mnt/disk4/taos 0
# dataDir /mnt/disk5/taos 0
# dataDir /mnt/disk6/taos 1
# dataDir /mnt/disk7/taos 1
# for the stand-alone version, data file's directory is configured this way
# data file's directory
# dataDir /var/lib/taos
# log file's directory
......@@ -46,12 +33,12 @@
# numOfMnodes 3
# optional roles for dnode. 0 - any, 1 - mnode, 2 - dnode
# alternativeRole 0
# role 0
# number of threads per CPU core
# numOfThreadsPerCore 1.0
# number of vgroups per db
# max number of vgroups per db
# maxVgroupsPerDb 0
# max number of tables per vnode
......@@ -60,9 +47,6 @@
# the ratio of threads responsible for querying in the total thread
# ratioOfQueryThreads 0.5
# interval of check load balance when the management node is in normal operation
# balanceInterval 300
# interval of DNode report status to MNode, unit is Second, for cluster version only
# statusInterval 1
......@@ -82,13 +66,13 @@
# tableMetaKeepTimer 7200
# Minimum sliding window time
# minSlidingTime 10
# minSlidingTime 10
# Time window minimum
# minIntervalTime 10
# minIntervalTime 10
# max length of an SQL
# maxSQLLength 65380
# maxSQLLength 65480
# Support the maximum number of records allowed for super table time sorting
# maxNumOfOrderedRes 100000
......@@ -153,11 +137,8 @@
# Stop writing data when the disk size of the log folder is less than this value
# minimalDataDirGB 0.1
# mnode take into account while balance, for cluster version only
# mnodeEqualVnodeNum 4
# number of seconds allowed for a dnode to be offline, for cluster version only
# offlineThreshold 864000
# offlineThreshold 8640000
# start http service
# http 1
......@@ -165,7 +146,7 @@
# start system monitor module
# monitor 1
# start http service
# start muqq service
# mqtt 0
# mqtt uri
......@@ -201,19 +182,22 @@
# 131: output warning and error,135: output info, warning and error to log.
# 199: output debug, info, warning and error to both screen and file
# debug flag for basic utils
# debugFlag 131
# debug flag for all log type, take effect when non-zero value
# debugFlag 0
# debug flag for meta management messages
# mDebugFlag 135
# debug flag for dnode messages
# dDebugFlag 131
# dDebugFlag 135
# debug flag for TDengine SDB
# debug flag for sync module
# sDebugFlag 135
# debug flag for TDengine SDB
# debug flag for WAL
# wDebugFlag 135
# debug flag for SDB
# sdbDebugFlag 135
# debug flag for RPC
......@@ -246,6 +230,9 @@
# debug flag for query
# qDebugflag 131
# debug flag for vnode
# vDebugflag 131
# debug flag for http server
# tsdbDebugFlag 131
......
......@@ -358,7 +358,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_CFG_DNODE: {
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:1-dnode:2' / monitor 1 ";
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:2-dnode:2' / monitor 1 ";
const char* msg3 = "invalid dnode ep";
/* validate the ip address */
......@@ -4700,10 +4700,10 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
SSQLToken* pValToken = &pOptions->a[2];
int32_t vnodeIndex = 0;
int32_t dnodeIndex = 0;
int32_t vnodeId = 0;
int32_t dnodeId = 0;
strdequote(pValToken->z);
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeIndex, &dnodeIndex);
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeId, &dnodeId);
if (!parseOk) {
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
}
......
......@@ -106,13 +106,14 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
SCMCorVgroupInfo *pVgroupInfo = &pTableMetaInfo->pTableMeta->corVgroupInfo;
taosCorBeginWrite(&pVgroupInfo->version);
//TODO(dengyihao), dont care vgid
tscDebug("before: Endpoint in use: %d", pVgroupInfo->inUse);
pVgroupInfo->inUse = pEpSet->inUse;
pVgroupInfo->numOfEps = pEpSet->numOfEps;
for (int32_t i = 0; pVgroupInfo->numOfEps; i++) {
for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) {
strncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
pVgroupInfo->epAddr[i].port = pEpSet->port[i];
}
tscDebug("after: EndPoint in use: %d", pVgroupInfo->inUse);
taosCorEndWrite(&pVgroupInfo->version);
}
void tscPrintMgmtEp() {
......@@ -283,9 +284,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
}
if (pEpSet) {
//SRpcEpSet dump;
tscEpSetHtons(pEpSet);
if (tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if(pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
} else {
......
......@@ -256,11 +256,12 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
}
size_t numOfTables = taosArrayGetSize(tables);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress));
for( size_t i = 0; i < numOfTables; i++ ) {
STidTags* tt = taosArrayGet( tables, i );
SSubscriptionProgress p = { .uid = tt->uid };
p.key = tscGetSubscriptionProgress(pSub, tt->uid, INT64_MIN);
p.key = tscGetSubscriptionProgress(pSub, tt->uid, pQueryInfo->window.skey);
taosArrayPush(progress, &p);
}
taosArraySort(progress, tscCompareSubscriptionProgress);
......
......@@ -174,7 +174,7 @@ bool taosCheckGlobalCfg();
void taosSetAllDebugFlag();
bool taosCfgDynamicOptions(char *msg);
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t *dnodeIndex);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
#ifdef __cplusplus
}
......
......@@ -1315,7 +1315,7 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
* alter dnode 1 balance "vnode:1-dnode:2"
*/
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t *dnodeIndex) {
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId) {
int len = strlen(option);
if (strncasecmp(option, "vnode:", 6) != 0) {
return false;
......@@ -1331,9 +1331,9 @@ bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t
return false;
}
*vnodeIndex = strtol(option + 6, NULL, 10);
*dnodeIndex = strtol(option + pos + 6, NULL, 10);
if (*vnodeIndex <= 1 || *dnodeIndex <= 0) {
*vnodeId = strtol(option + 6, NULL, 10);
*dnodeId = strtol(option + pos + 6, NULL, 10);
if (*vnodeId <= 1 || *dnodeId <= 0) {
return false;
}
......
......@@ -492,6 +492,7 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) {
pCfg->numOfVnodes = htonl(pCfg->numOfVnodes);
pCfg->moduleStatus = htonl(pCfg->moduleStatus);
pCfg->dnodeId = htonl(pCfg->dnodeId);
pCfg->clusterId = htonl(pCfg->clusterId);
for (int32_t i = 0; i < pMnodes->nodeNum; ++i) {
SDMMnodeInfo *pMnodeInfo = &pMnodes->nodeInfos[i];
......@@ -697,6 +698,7 @@ static void dnodeSendStatusMsg(void *handle, void *tmrId) {
//strcpy(pStatus->dnodeName, tsDnodeName);
pStatus->version = htonl(tsVersion);
pStatus->dnodeId = htonl(tsDnodeCfg.dnodeId);
pStatus->clusterId = htonl(tsDnodeCfg.clusterId);
strcpy(pStatus->dnodeEp, tsLocalEp);
pStatus->lastReboot = htonl(tsRebootTime);
pStatus->numOfCores = htons((uint16_t) tsNumOfCores);
......@@ -767,6 +769,13 @@ static bool dnodeReadDnodeCfg() {
}
tsDnodeCfg.dnodeId = dnodeId->valueint;
cJSON* clusterId = cJSON_GetObjectItem(root, "clusterId");
if (!clusterId || clusterId->type != cJSON_Number) {
dError("failed to read dnodeCfg.json, clusterId not found");
goto PARSE_CFG_OVER;
}
tsDnodeCfg.clusterId = clusterId->valueint;
ret = true;
dInfo("read numOfVnodes successed, dnodeId:%d", tsDnodeCfg.dnodeId);
......@@ -790,7 +799,8 @@ static void dnodeSaveDnodeCfg() {
char * content = calloc(1, maxLen + 1);
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d\n", tsDnodeCfg.dnodeId);
len += snprintf(content + len, maxLen - len, " \"dnodeId\": %d,\n", tsDnodeCfg.dnodeId);
len += snprintf(content + len, maxLen - len, " \"clusterId\": %d\n", tsDnodeCfg.clusterId);
len += snprintf(content + len, maxLen - len, "}\n");
fwrite(content, 1, len, fp);
......@@ -803,8 +813,9 @@ static void dnodeSaveDnodeCfg() {
void dnodeUpdateDnodeCfg(SDMDnodeCfg *pCfg) {
if (tsDnodeCfg.dnodeId == 0) {
dInfo("dnodeId is set to %d", pCfg->dnodeId);
dInfo("dnodeId is set to %d, clusterId is set to %d", pCfg->dnodeId, pCfg->clusterId);
tsDnodeCfg.dnodeId = pCfg->dnodeId;
tsDnodeCfg.clusterId = pCfg->clusterId;
dnodeSaveDnodeCfg();
}
}
......
......@@ -377,6 +377,7 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_ORDER_ASC 1
#define TSDB_ORDER_DESC 2
#define TSDB_DEFAULT_CLUSTER_HASH_SIZE 1
#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
......
......@@ -120,12 +120,18 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_NOT_THERE, 0, 0x0323, "sdb object
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_META_ROW, 0, 0x0324, "sdb invalid meta row")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_KEY_TYPE, 0, 0x0325, "sdb invalid key type")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "mnode dnode already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "mnode dnode not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "mnode vgroup not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "mnode cant not remove master")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "mnode no enough dnodes")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "mnode cluster cfg inconsistent")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "dnode already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "dnode not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "vgroup not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "cant not remove master")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "no enough dnodes")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "cluster cfg inconsistent")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION, 0, 0x0336, "invalid dnode cfg option")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_BALANCE_ENABLED, 0, 0x0337, "balance already enabled")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_IN_DNODE, 0, 0x0338, "vgroup not in dnode")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE, 0, 0x0339, "vgroup already in dnode")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_FREE, 0, 0x033A, "dnode not avaliable")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CLUSTER_ID, 0, 0x033B, "cluster id not match")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "mnode accounts already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "mnode invalid account")
......
......@@ -139,6 +139,7 @@ enum _mgmt_table {
TSDB_MGMT_TABLE_GRANTS,
TSDB_MGMT_TABLE_VNODES,
TSDB_MGMT_TABLE_STREAMTABLES,
TSDB_MGMT_TABLE_CLUSTER,
TSDB_MGMT_TABLE_MAX,
};
......@@ -149,6 +150,7 @@ enum _mgmt_table {
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
#define TSDB_FILL_NONE 0
#define TSDB_FILL_NULL 1
......@@ -545,6 +547,7 @@ typedef struct {
typedef struct {
int32_t dnodeId;
int32_t clusterId;
uint32_t moduleStatus;
uint32_t numOfVnodes;
uint32_t reserved;
......@@ -585,6 +588,7 @@ typedef struct {
uint16_t openVnodes;
uint16_t numOfCores;
float diskAvailable; // GB
int32_t clusterId;
uint8_t alternativeRole;
uint8_t reserve2[15];
SClusterCfg clusterCfg;
......
......@@ -29,7 +29,7 @@ void balanceAsyncNotify();
void balanceSyncNotify();
void balanceReset();
int32_t balanceAllocVnodes(struct SVgObj *pVgroup);
int32_t balanceCfgDnode(struct SDnodeObj *pDnode, const char *option);
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId);
int32_t balanceDropDnode(struct SDnodeObj *pDnode);
#ifdef __cplusplus
......
......@@ -53,14 +53,12 @@ typedef struct {
int32_t tsdbId;
int32_t cacheBlockSize;
int32_t totalBlocks;
int32_t maxTables; // maximum number of tables this repository can have
int32_t daysPerFile; // day per file sharding policy
int32_t keep; // day of data to keep
int32_t keep1;
int32_t keep2;
int32_t minRowsPerFileBlock; // minimum rows per file block
int32_t maxRowsPerFileBlock; // maximum rows per file block
int32_t commitTime;
int8_t precision;
int8_t compression;
} STsdbCfg;
......
......@@ -37,14 +37,12 @@ static int32_t saveVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnode->cfgVersion);
len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnode->tsdbCfg.cacheBlockSize);
len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnode->tsdbCfg.totalBlocks);
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnode->tsdbCfg.maxTables);
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnode->tsdbCfg.daysPerFile);
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnode->tsdbCfg.keep);
len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnode->tsdbCfg.keep1);
len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnode->tsdbCfg.keep2);
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.minRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.maxRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnode->tsdbCfg.commitTime);
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnode->tsdbCfg.precision);
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnode->tsdbCfg.compression);
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnode->walCfg.walLevel);
......@@ -136,12 +134,12 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
}
pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint;
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
if (!maxTables || maxTables->type != cJSON_Number) {
printf("vgId:%d, failed to read vnode cfg, maxTables not found\n", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.maxTables = maxTables->valueint;
// cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
// if (!maxTables || maxTables->type != cJSON_Number) {
// printf("vgId:%d, failed to read vnode cfg, maxTables not found\n", pVnode->vgId);
// goto PARSE_OVER;
// }
// pVnode->tsdbCfg.maxTables = maxTables->valueint;
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
......@@ -185,12 +183,12 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
}
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
if (!commitTime || commitTime->type != cJSON_Number) {
printf("vgId:%d, failed to read vnode cfg, commitTime not found\n", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
// cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
// if (!commitTime || commitTime->type != cJSON_Number) {
// printf("vgId:%d, failed to read vnode cfg, commitTime not found\n", pVnode->vgId);
// goto PARSE_OVER;
// }
// pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
cJSON *precision = cJSON_GetObjectItem(root, "precision");
if (!precision || precision->type != cJSON_Number) {
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MNODE_CLUSTER_H
#define TDENGINE_MNODE_CLUSTER_H
#ifdef __cplusplus
extern "C" {
#endif
struct SClusterObj;
int32_t mnodeInitCluster();
void mnodeCleanupCluster();
int32_t mnodeGetClusterId();
void mnodeUpdateClusterId();
void * mnodeGetCluster(int32_t clusterId);
void * mnodeGetNextCluster(void *pIter, struct SClusterObj **pCluster);
void mnodeIncClusterRef(struct SClusterObj *pCluster);
void mnodeDecClusterRef(struct SClusterObj *pCluster);
#ifdef __cplusplus
}
#endif
#endif
......@@ -36,6 +36,14 @@ struct define notes:
3. The fields behind the updataEnd field can be changed;
*/
typedef struct SClusterObj {
int32_t clusterId;
int64_t createdTime;
int8_t reserved[36];
int8_t updateEnd[4];
int32_t refCount;
} SClusterObj;
typedef struct SDnodeObj {
int32_t dnodeId;
int32_t openVnodes;
......
......@@ -23,15 +23,16 @@ extern "C" {
struct SMnodeMsg;
typedef enum {
SDB_TABLE_DNODE = 0,
SDB_TABLE_MNODE = 1,
SDB_TABLE_ACCOUNT = 2,
SDB_TABLE_USER = 3,
SDB_TABLE_DB = 4,
SDB_TABLE_VGROUP = 5,
SDB_TABLE_STABLE = 6,
SDB_TABLE_CTABLE = 7,
SDB_TABLE_MAX = 8
SDB_TABLE_CLUSTER = 0,
SDB_TABLE_DNODE = 1,
SDB_TABLE_MNODE = 2,
SDB_TABLE_ACCOUNT = 3,
SDB_TABLE_USER = 4,
SDB_TABLE_DB = 5,
SDB_TABLE_VGROUP = 6,
SDB_TABLE_STABLE = 7,
SDB_TABLE_CTABLE = 8,
SDB_TABLE_MAX = 9
} ESdbTable;
typedef enum {
......
......@@ -64,7 +64,7 @@ static int32_t mnodeAcctActionUpdate(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeActionActionEncode(SSdbOper *pOper) {
static int32_t mnodeAcctActionEncode(SSdbOper *pOper) {
SAcctObj *pAcct = pOper->pObj;
memcpy(pOper->rowData, pAcct, tsAcctUpdateSize);
pOper->rowSize = tsAcctUpdateSize;
......@@ -109,7 +109,7 @@ int32_t mnodeInitAccts() {
.insertFp = mnodeAcctActionInsert,
.deleteFp = mnodeAcctActionDelete,
.updateFp = mnodeAcctActionUpdate,
.encodeFp = mnodeActionActionEncode,
.encodeFp = mnodeAcctActionEncode,
.decodeFp = mnodeAcctActionDecode,
.destroyFp = mnodeAcctActionDestroy,
.restoredFp = mnodeAcctActionRestored
......
......@@ -28,7 +28,7 @@ void balanceCleanUp() {}
void balanceAsyncNotify() {}
void balanceSyncNotify() {}
void balanceReset() {}
int32_t balanceCfgDnode(struct SDnodeObj *pDnode, const char *option) { return TSDB_CODE_SYN_NOT_ENABLED; }
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId) { return TSDB_CODE_SYN_NOT_ENABLED; }
int32_t balanceAllocVnodes(SVgObj *pVgroup) {
void * pIter = NULL;
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "os.h"
#include "taoserror.h"
#include "ttime.h"
#include "dnode.h"
#include "mnodeDef.h"
#include "mnodeInt.h"
#include "mnodeCluster.h"
#include "mnodeSdb.h"
#include "mnodeShow.h"
#include "tglobal.h"
static void * tsClusterSdb = NULL;
static int32_t tsClusterUpdateSize;
static int32_t tsClusterId;
static int32_t mnodeCreateCluster();
static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t mnodeClusterActionDestroy(SSdbOper *pOper) {
tfree(pOper->pObj);
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeClusterActionInsert(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeClusterActionDelete(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeClusterActionUpdate(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeClusterActionEncode(SSdbOper *pOper) {
SClusterObj *pCluster = pOper->pObj;
memcpy(pOper->rowData, pCluster, tsClusterUpdateSize);
pOper->rowSize = tsClusterUpdateSize;
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeClusterActionDecode(SSdbOper *pOper) {
SClusterObj *pCluster = (SClusterObj *) calloc(1, sizeof(SClusterObj));
if (pCluster == NULL) return TSDB_CODE_MND_OUT_OF_MEMORY;
memcpy(pCluster, pOper->rowData, tsClusterUpdateSize);
pOper->pObj = pCluster;
return TSDB_CODE_SUCCESS;
}
static int32_t mnodeClusterActionRestored() {
int32_t numOfRows = sdbGetNumOfRows(tsClusterSdb);
if (numOfRows <= 0 && dnodeIsFirstDeploy()) {
mInfo("dnode first deploy, create cluster");
int32_t code = mnodeCreateCluster();
if (code != TSDB_CODE_SUCCESS) {
mError("failed to create cluster, reason:%s", tstrerror(code));
return code;
}
}
mnodeUpdateClusterId();
return TSDB_CODE_SUCCESS;
}
int32_t mnodeInitCluster() {
SClusterObj tObj;
tsClusterUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj;
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_CLUSTER,
.tableName = "cluster",
.hashSessions = TSDB_DEFAULT_CLUSTER_HASH_SIZE,
.maxRowSize = tsClusterUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_INT,
.insertFp = mnodeClusterActionInsert,
.deleteFp = mnodeClusterActionDelete,
.updateFp = mnodeClusterActionUpdate,
.encodeFp = mnodeClusterActionEncode,
.decodeFp = mnodeClusterActionDecode,
.destroyFp = mnodeClusterActionDestroy,
.restoredFp = mnodeClusterActionRestored
};
tsClusterSdb = sdbOpenTable(&tableDesc);
if (tsClusterSdb == NULL) {
mError("table:%s, failed to create hash", tableDesc.tableName);
return -1;
}
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_CLUSTER, mnodeGetClusterMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_CLUSTER, mnodeRetrieveClusters);
mDebug("table:%s, hash is created", tableDesc.tableName);
return TSDB_CODE_SUCCESS;
}
void mnodeCleanupCluster() {
sdbCloseTable(tsClusterSdb);
tsClusterSdb = NULL;
}
void *mnodeGetCluster(int32_t clusterId) {
return sdbGetRow(tsClusterSdb, &clusterId);
}
void *mnodeGetNextCluster(void *pIter, SClusterObj **pCluster) {
return sdbFetchRow(tsClusterSdb, pIter, (void **)pCluster);
}
void mnodeIncClusterRef(SClusterObj *pCluster) {
sdbIncRef(tsClusterSdb, pCluster);
}
void mnodeDecClusterRef(SClusterObj *pCluster) {
sdbDecRef(tsClusterSdb, pCluster);
}
static int32_t mnodeCreateCluster() {
int32_t numOfClusters = sdbGetNumOfRows(tsClusterSdb);
if (numOfClusters != 0) return TSDB_CODE_SUCCESS;
SClusterObj *pCluster = malloc(sizeof(SClusterObj));
memset(pCluster, 0, sizeof(SClusterObj));
pCluster->createdTime = taosGetTimestampMs();
pCluster->clusterId = labs((pCluster->createdTime >> 32) & (pCluster->createdTime)) | (*(int32_t*)tsFirst);
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
.table = tsClusterSdb,
.pObj = pCluster,
};
return sdbInsertRow(&oper);
}
int32_t mnodeGetClusterId() {
return tsClusterId;
}
void mnodeUpdateClusterId() {
SClusterObj *pCluster = NULL;
mnodeGetNextCluster(NULL, &pCluster);
if (pCluster != NULL) {
tsClusterId = pCluster->clusterId;
mnodeDecClusterRef(pCluster);
mInfo("cluster id is %d", tsClusterId);
} else {
//assert(false);
}
}
static int32_t mnodeGetClusterMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
int32_t cols = 0;
SSchema *pSchema = pMeta->schema;
pShow->bytes[cols] = 4;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "clusterId");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 8;
pSchema[cols].type = TSDB_DATA_TYPE_TIMESTAMP;
strcpy(pSchema[cols].name, "create_time");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pMeta->numOfColumns = htons(cols);
strcpy(pMeta->tableId, "show cluster");
pShow->numOfColumns = cols;
pShow->offset[0] = 0;
for (int32_t i = 1; i < cols; ++i) {
pShow->offset[i] = pShow->offset[i - 1] + pShow->bytes[i - 1];
}
pShow->numOfRows = 1;
pShow->rowSize = pShow->offset[cols - 1] + pShow->bytes[cols - 1];
return 0;
}
static int32_t mnodeRetrieveClusters(SShowObj *pShow, char *data, int32_t rows, void *pConn) {
int32_t numOfRows = 0;
int32_t cols = 0;
char * pWrite;
SClusterObj *pCluster = NULL;
while (numOfRows < rows) {
pShow->pIter = mnodeGetNextCluster(pShow->pIter, &pCluster);
if (pCluster == NULL) break;
cols = 0;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *) pWrite = pCluster->clusterId;
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
*(int32_t *) pWrite = pCluster->createdTime;
cols++;
mnodeDecClusterRef(pCluster);
numOfRows++;
}
pShow->numOfReads += numOfRows;
return numOfRows;
}
......@@ -37,6 +37,7 @@
#include "mnodeVgroup.h"
#include "mnodeWrite.h"
#include "mnodePeer.h"
#include "mnodeCluster.h"
int32_t tsAccessSquence = 0;
static void *tsDnodeSdb = NULL;
......@@ -295,10 +296,19 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
}
SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
mnodeDecDnodeRef(pDnode);
if (strncasecmp(pCmCfgDnode->config, "balance", 7) == 0) {
return balanceCfgDnode(pDnode, pCmCfgDnode->config + 8);
int32_t vnodeId = 0;
int32_t dnodeId = 0;
bool parseOk = taosCheckBalanceCfgOptions(pCmCfgDnode->config + 8, &vnodeId, &dnodeId);
if (!parseOk) {
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION;
}
int32_t code = balanceAlterDnode(pDnode, vnodeId, dnodeId);
mnodeDecDnodeRef(pDnode);
return code;
} else {
SMDCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SMDCfgDnodeMsg));
strcpy(pMdCfgDnode->ep, pCmCfgDnode->ep);
......@@ -314,6 +324,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user);
dnodeSendMsgToDnode(&epSet, &rpcMdCfgDnodeMsg);
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_SUCCESS;
}
}
......@@ -345,6 +356,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
pStatus->moduleStatus = htonl(pStatus->moduleStatus);
pStatus->lastReboot = htonl(pStatus->lastReboot);
pStatus->numOfCores = htons(pStatus->numOfCores);
pStatus->clusterId = htonl(pStatus->clusterId);
uint32_t version = htonl(pStatus->version);
if (version != tsVersion) {
......@@ -372,13 +384,19 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
pDnode->diskAvailable = pStatus->diskAvailable;
pDnode->alternativeRole = pStatus->alternativeRole;
pDnode->moduleStatus = pStatus->moduleStatus;
if (pStatus->dnodeId == 0) {
mDebug("dnode:%d %s, first access", pDnode->dnodeId, pDnode->dnodeEp);
mDebug("dnode:%d %s, first access, set clusterId %d", pDnode->dnodeId, pDnode->dnodeEp, mnodeGetClusterId());
} else {
mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
if (pStatus->clusterId != mnodeGetClusterId()) {
mError("dnode:%d, input clusterId %d not match with exist %d", pDnode->dnodeId, pStatus->clusterId,
mnodeGetClusterId());
return TSDB_CODE_MND_INVALID_CLUSTER_ID;
} else {
mTrace("dnode:%d, status received, access times %d", pDnode->dnodeId, pDnode->lastAccess);
}
}
int32_t openVnodes = htons(pStatus->openVnodes);
int32_t contLen = sizeof(SDMStatusRsp) + openVnodes * sizeof(SDMVgroupAccess);
SDMStatusRsp *pRsp = rpcMallocCont(contLen);
......@@ -390,6 +408,7 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) {
pRsp->dnodeCfg.dnodeId = htonl(pDnode->dnodeId);
pRsp->dnodeCfg.moduleStatus = htonl((int32_t)pDnode->isMgmt);
pRsp->dnodeCfg.numOfVnodes = htonl(openVnodes);
pRsp->dnodeCfg.clusterId = htonl(mnodeGetClusterId());
SDMVgroupAccess *pAccess = (SDMVgroupAccess *)((char *)pRsp + sizeof(SDMStatusRsp));
for (int32_t j = 0; j < openVnodes; ++j) {
......
......@@ -32,6 +32,7 @@
#include "mnodeVgroup.h"
#include "mnodeUser.h"
#include "mnodeTable.h"
#include "mnodeCluster.h"
#include "mnodeShow.h"
#include "mnodeProfile.h"
......@@ -46,6 +47,7 @@ static bool tsMgmtIsRunning = false;
static const SMnodeComponent tsMnodeComponents[] = {
{"profile", mnodeInitProfile, mnodeCleanupProfile},
{"cluster", mnodeInitCluster, mnodeCleanupCluster},
{"accts", mnodeInitAccts, mnodeCleanupAccts},
{"users", mnodeInitUsers, mnodeCleanupUsers},
{"dnodes", mnodeInitDnodes, mnodeCleanupDnodes},
......
......@@ -29,6 +29,7 @@
#include "mnodeInt.h"
#include "mnodeMnode.h"
#include "mnodeDnode.h"
#include "mnodeCluster.h"
#include "mnodeSdb.h"
#define SDB_TABLE_LEN 12
......@@ -214,6 +215,7 @@ void sdbUpdateMnodeRoles() {
}
}
mnodeUpdateClusterId();
mnodeUpdateMnodeEpSet();
}
......
......@@ -103,6 +103,8 @@ static char *mnodeGetShowType(int32_t showType) {
case TSDB_MGMT_TABLE_SCORES: return "show scores";
case TSDB_MGMT_TABLE_GRANTS: return "show grants";
case TSDB_MGMT_TABLE_VNODES: return "show vnodes";
case TSDB_MGMT_TABLE_CLUSTER: return "show clusters";
case TSDB_MGMT_TABLE_STREAMTABLES : return "show streamtables";
default: return "undefined";
}
}
......
......@@ -1223,6 +1223,55 @@ static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) {
return code;
}
static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
mLInfo("app:%p:%p, stable %s, change column result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
tstrerror(code));
return code;
}
static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
int32_t col = mnodeFindSuperTableColumnIndex(pStable, oldName);
if (col < 0) {
mError("app:%p:%p, stable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg,
pStable->info.tableId, oldName, newName);
return TSDB_CODE_MND_FIELD_NOT_EXIST;
}
// int32_t rowSize = 0;
uint32_t len = strlen(newName);
if (len >= TSDB_COL_NAME_LEN) {
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
}
if (mnodeFindSuperTableColumnIndex(pStable, newName) >= 0) {
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
}
// update
SSchema *schema = (SSchema *) (pStable->schema + col);
tstrncpy(schema->name, newName, sizeof(schema->name));
mInfo("app:%p:%p, stable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
oldName, newName);
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
.table = tsSuperTableSdb,
.pObj = pStable,
.pMsg = pMsg,
.cb = mnodeChangeSuperTableColumnCb
};
int32_t code = sdbUpdateRow(&oper);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
}
return code;
}
// show super tables
static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
SDbObj *pDb = mnodeGetDb(pShow->db);
......@@ -1405,6 +1454,9 @@ static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pT
static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
STableMetaMsg *pMeta = rpcMallocCont(sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16));
if (pMeta == NULL) {
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
pMeta->uid = htobe64(pTable->uid);
pMeta->sversion = htons(pTable->sversion);
pMeta->tversion = htons(pTable->tversion);
......@@ -1977,6 +2029,48 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) {
return code;
}
static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
int32_t col = mnodeFindNormalTableColumnIndex(pTable, oldName);
if (col < 0) {
mError("app:%p:%p, ctable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg,
pTable->info.tableId, oldName, newName);
return TSDB_CODE_MND_FIELD_NOT_EXIST;
}
// int32_t rowSize = 0;
uint32_t len = strlen(newName);
if (len >= TSDB_COL_NAME_LEN) {
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
}
if (mnodeFindNormalTableColumnIndex(pTable, newName) >= 0) {
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
}
// update
SSchema *schema = (SSchema *) (pTable->schema + col);
tstrncpy(schema->name, newName, sizeof(schema->name));
mInfo("app:%p:%p, ctable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
oldName, newName);
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
.table = tsChildTableSdb,
.pObj = pTable,
.pMsg = pMsg,
.cb = mnodeAlterNormalTableColumnCb
};
int32_t code = sdbUpdateRow(&oper);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
}
return code;
}
static int32_t mnodeSetSchemaFromNormalTable(SSchema *pSchema, SChildTableObj *pTable) {
int32_t numOfCols = pTable->numOfColumns;
for (int32_t i = 0; i < numOfCols; ++i) {
......@@ -2596,6 +2690,8 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
code = mnodeAddSuperTableColumn(pMsg, pAlter->schema, 1);
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name);
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
} else {
}
} else {
......@@ -2606,6 +2702,8 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
code = mnodeAddNormalTableColumn(pMsg, pAlter->schema, 1);
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name);
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
} else {
}
}
......
......@@ -42,7 +42,7 @@ typedef struct SSqlGroupbyExpr {
} SSqlGroupbyExpr;
typedef struct SPosInfo {
int16_t pageId;
int32_t pageId;
int16_t rowId;
} SPosInfo;
......
......@@ -5913,8 +5913,10 @@ _cleanup_qinfo:
tsdbDestroyTableGroup(pTableGroupInfo);
_cleanup_query:
taosArrayDestroy(pGroupbyExpr->columnInfo);
tfree(pGroupbyExpr);
if (pGroupbyExpr != NULL) {
taosArrayDestroy(pGroupbyExpr->columnInfo);
free(pGroupbyExpr);
}
tfree(pTagCols);
for (int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pExprInfo = &pExprs[i];
......
......@@ -52,7 +52,7 @@ int32_t getResBufSize(SDiskbasedResultBuf* pResultBuf) { return pResultBuf->tota
#define FILE_SIZE_ON_DISK(_r) (NUM_OF_PAGES_ON_DISK(_r) * (_r)->pageSize)
static int32_t createDiskResidesBuf(SDiskbasedResultBuf* pResultBuf) {
pResultBuf->fd = open(pResultBuf->path, O_CREAT | O_RDWR, 0666);
pResultBuf->fd = open(pResultBuf->path, O_CREAT | O_RDWR | O_TRUNC, 0666);
if (!FD_VALID(pResultBuf->fd)) {
qError("failed to create tmp file: %s on disk. %s", pResultBuf->path, strerror(errno));
return TAOS_SYSTEM_ERROR(errno);
......
......@@ -41,6 +41,9 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
pWindowResInfo->type = type;
_hash_fn_t fn = taosGetDefaultHashFunction(type);
pWindowResInfo->hashList = taosHashInit(threshold, fn, false);
if (pWindowResInfo->hashList == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
pWindowResInfo->curIndex = -1;
pWindowResInfo->size = 0;
......
......@@ -660,7 +660,7 @@ static SRpcConn *rpcAllocateClientConn(SRpcInfo *pRpc) {
pConn->spi = pRpc->spi;
pConn->encrypt = pRpc->encrypt;
if (pConn->spi) memcpy(pConn->secret, pRpc->secret, TSDB_KEY_LEN);
tDebug("%s %p client connection is allocated", pRpc->label, pConn);
tDebug("%s %p client connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
}
return pConn;
......@@ -721,7 +721,7 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
}
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
tDebug("%s %p server connection is allocated", pRpc->label, pConn);
tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
}
return pConn;
......@@ -848,6 +848,16 @@ static int rpcProcessRspHead(SRpcConn *pConn, SRpcHead *pHead) {
return TSDB_CODE_RPC_ALREADY_PROCESSED;
}
if (pHead->code == TSDB_CODE_RPC_MISMATCHED_LINK_ID) {
tDebug("%s, mismatched linkUid, link shall be restarted", pConn->info);
pConn->secured = 0;
((SRpcHead *)pConn->pReqMsg)->destId = 0;
rpcSendMsgToPeer(pConn, pConn->pReqMsg, pConn->reqMsgLen);
if (pConn->connType != RPC_CONN_TCPC)
pConn->pTimer = taosTmrStart(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl);
return TSDB_CODE_RPC_ALREADY_PROCESSED;
}
if (pHead->code == TSDB_CODE_RPC_ACTION_IN_PROGRESS) {
if (pConn->tretry <= tsRpcMaxRetry) {
tDebug("%s, peer is still processing the transaction, retry:%d", pConn->info, pConn->tretry);
......
......@@ -70,6 +70,7 @@ typedef struct {
pthread_rwlock_t rwLock;
int32_t nTables;
int32_t maxTables;
STable** tables;
SList* superList;
SHashObj* uidMap;
......@@ -111,9 +112,11 @@ typedef struct {
typedef struct {
T_REF_DECLARE();
SRWLatch latch;
TSKEY keyFirst;
TSKEY keyLast;
int64_t numOfRows;
int32_t maxTables;
STableData** tData;
SList* actList;
SList* bufBlockList;
......@@ -304,6 +307,7 @@ typedef struct {
// Operations
// ------------------ tsdbMeta.c
#define TSDB_INIT_NTABLES 1024
#define TABLE_TYPE(t) (t)->type
#define TABLE_NAME(t) (t)->name
#define TABLE_CHAR_NAME(t) TABLE_NAME(t)->data
......@@ -395,6 +399,7 @@ int tsdbInsertRowToMem(STsdbRepo* pRepo, SDataRow row, STable* pTable);
int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem);
void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem);
void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes);
int tsdbAsyncCommit(STsdbRepo* pRepo);
int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols,
......@@ -429,7 +434,7 @@ STsdbFileH* tsdbNewFileH(STsdbCfg* pCfg);
void tsdbFreeFileH(STsdbFileH* pFileH);
int tsdbOpenFileH(STsdbRepo* pRepo);
void tsdbCloseFileH(STsdbRepo* pRepo);
SFileGroup* tsdbCreateFGroupIfNeed(STsdbRepo* pRepo, char* dataDir, int fid, int maxTables);
SFileGroup* tsdbCreateFGroupIfNeed(STsdbRepo* pRepo, char* dataDir, int fid);
void tsdbInitFileGroupIter(STsdbFileH* pFileH, SFileGroupIter* pIter, int direction);
void tsdbSeekFileGroupIter(SFileGroupIter* pIter, int fid);
SFileGroup* tsdbGetFileGroupNext(SFileGroupIter* pIter);
......@@ -511,6 +516,7 @@ void tsdbGetDataFileName(STsdbRepo* pRepo, int fid, int type, char* fname
int tsdbLockRepo(STsdbRepo* pRepo);
int tsdbUnlockRepo(STsdbRepo* pRepo);
char* tsdbGetDataDirName(char* rootDir);
int tsdbGetNextMaxTables(int tid);
STsdbMeta* tsdbGetMeta(TSDB_REPO_T* pRepo);
STsdbFileH* tsdbGetFile(TSDB_REPO_T* pRepo);
......
......@@ -149,7 +149,7 @@ void tsdbCloseFileH(STsdbRepo *pRepo) {
}
}
SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid, int maxTables) {
SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) {
STsdbFileH *pFileH = pRepo->tsdbFileH;
if (pFileH->nFGroups >= pFileH->maxFGroups) return NULL;
......
......@@ -62,7 +62,6 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo);
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression);
static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep);
static int tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables);
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks);
static int keyFGroupCompFunc(const void *key, const void *fgroup);
static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg);
......@@ -85,10 +84,10 @@ int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) {
if (tsdbSetRepoEnv(rootDir, pCfg) < 0) return -1;
tsdbDebug(
"vgId:%d tsdb env create succeed! cacheBlockSize %d totalBlocks %d maxTables %d daysPerFile %d keep "
"vgId:%d tsdb env create succeed! cacheBlockSize %d totalBlocks %d daysPerFile %d keep "
"%d minRowsPerFileBlock %d maxRowsPerFileBlock %d precision %d compression %d",
pCfg->tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->maxTables, pCfg->daysPerFile, pCfg->keep,
pCfg->minRowsPerFileBlock, pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression);
pCfg->tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->daysPerFile, pCfg->keep, pCfg->minRowsPerFileBlock,
pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression);
return 0;
}
......@@ -307,13 +306,6 @@ int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) {
tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks);
configChanged = true;
}
if (pRCfg->maxTables != pCfg->maxTables) {
if (tsdbAlterMaxTables(pRepo, pCfg->maxTables) < 0) {
tsdbError("vgId:%d failed to configure repo when alter maxTables since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
configChanged = true;
}
if (configChanged) {
if (tsdbSaveConfig(pRepo->rootDir, &pRepo->config) < 0) {
......@@ -385,6 +377,18 @@ char *tsdbGetDataDirName(char *rootDir) {
return fname;
}
int tsdbGetNextMaxTables(int tid) {
ASSERT(tid >= 1 && tid <= TSDB_MAX_TABLES);
int maxTables = TSDB_INIT_NTABLES;
while (true) {
maxTables = MIN(maxTables, TSDB_MAX_TABLES);
if (tid <= maxTables) break;
maxTables *= 2;
}
return maxTables + 1;
}
STsdbMeta * tsdbGetMeta(TSDB_REPO_T *pRepo) { return ((STsdbRepo *)pRepo)->tsdbMeta; }
STsdbFileH * tsdbGetFile(TSDB_REPO_T *pRepo) { return ((STsdbRepo *)pRepo)->tsdbFileH; }
STsdbRepoInfo *tsdbGetStatus(TSDB_REPO_T *pRepo) { return NULL; }
......@@ -417,17 +421,6 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) {
goto _err;
}
// Check maxTables
if (pCfg->maxTables == -1) {
pCfg->maxTables = TSDB_DEFAULT_TABLES+1;
} else {
if (pCfg->maxTables - 1 < TSDB_MIN_TABLES || pCfg->maxTables - 1 > TSDB_MAX_TABLES) {
tsdbError("vgId:%d invalid maxTables configuration! maxTables %d TSDB_MIN_TABLES %d TSDB_MAX_TABLES %d",
pCfg->tsdbId, pCfg->maxTables - 1, TSDB_MIN_TABLES, TSDB_MAX_TABLES);
goto _err;
}
}
// Check daysPerFile
if (pCfg->daysPerFile == -1) {
pCfg->daysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
......@@ -713,6 +706,7 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
STsdbMeta *pMeta = pRepo->tsdbMeta;
int64_t points = 0;
ASSERT(pBlock->tid < pMeta->maxTables);
STable *pTable = pMeta->tables[pBlock->tid];
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
......@@ -779,7 +773,6 @@ static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
}
static int tsdbRestoreInfo(STsdbRepo *pRepo) {
// TODO
STsdbMeta * pMeta = pRepo->tsdbMeta;
STsdbFileH *pFileH = pRepo->tsdbFileH;
SFileGroup *pFGroup = NULL;
......@@ -792,7 +785,7 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) {
tsdbInitFileGroupIter(pFileH, &iter, TSDB_ORDER_DESC);
while ((pFGroup = tsdbGetFileGroupNext(&iter)) != NULL) {
if (tsdbSetAndOpenHelperFile(&rhelper, pFGroup) < 0) goto _err;
for (int i = 1; i < pRepo->config.maxTables; i++) {
for (int i = 1; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable == NULL) continue;
tsdbSetHelperTable(&rhelper, pTable, pRepo);
......@@ -868,36 +861,6 @@ static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) {
return 0;
}
static int tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables) {
// TODO
int oldMaxTables = pRepo->config.maxTables;
if (oldMaxTables < pRepo->config.maxTables) {
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
STsdbMeta *pMeta = pRepo->tsdbMeta;
pMeta->tables = realloc(pMeta->tables, maxTables * sizeof(STable *));
memset(&pMeta->tables[oldMaxTables], 0, sizeof(STable *) * (maxTables - oldMaxTables));
pRepo->config.maxTables = maxTables;
if (pRepo->mem) {
pRepo->mem->tData = realloc(pRepo->mem->tData, maxTables * sizeof(STableData *));
memset(POINTER_SHIFT(pRepo->mem->tData, sizeof(STableData *) * oldMaxTables), 0,
sizeof(STableData *) * (maxTables - oldMaxTables));
}
if (pRepo->imem) {
pRepo->imem->tData = realloc(pRepo->imem->tData, maxTables * sizeof(STableData *));
memset(POINTER_SHIFT(pRepo->imem->tData, sizeof(STableData *) * oldMaxTables), 0,
sizeof(STableData *) * (maxTables - oldMaxTables));
}
tsdbDebug("vgId:%d, tsdb maxTables is changed from %d to %d!", pRepo->config.tsdbId, oldMaxTables, maxTables);
return 0;
}
static int keyFGroupCompFunc(const void *key, const void *fgroup) {
int fid = *(int *)key;
SFileGroup *pFGroup = (SFileGroup *)fgroup;
......@@ -914,7 +877,6 @@ static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg) {
tlen += taosEncodeVariantI32(buf, pCfg->tsdbId);
tlen += taosEncodeFixedI32(buf, pCfg->cacheBlockSize);
tlen += taosEncodeVariantI32(buf, pCfg->totalBlocks);
tlen += taosEncodeVariantI32(buf, pCfg->maxTables);
tlen += taosEncodeVariantI32(buf, pCfg->daysPerFile);
tlen += taosEncodeVariantI32(buf, pCfg->keep);
tlen += taosEncodeVariantI32(buf, pCfg->keep1);
......@@ -931,7 +893,6 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
buf = taosDecodeVariantI32(buf, &(pCfg->tsdbId));
buf = taosDecodeFixedI32(buf, &(pCfg->cacheBlockSize));
buf = taosDecodeVariantI32(buf, &(pCfg->totalBlocks));
buf = taosDecodeVariantI32(buf, &(pCfg->maxTables));
buf = taosDecodeVariantI32(buf, &(pCfg->daysPerFile));
buf = taosDecodeVariantI32(buf, &(pCfg->keep));
buf = taosDecodeVariantI32(buf, &(pCfg->keep1));
......@@ -1037,7 +998,7 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
if (pBlock->tid <= 0 || pBlock->tid >= pRepo->config.maxTables) {
if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
......@@ -1120,7 +1081,7 @@ TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid) {
static void tsdbStartStream(STsdbRepo *pRepo) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
for (int i = 0; i < pRepo->config.maxTables; i++) {
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql,
......@@ -1133,7 +1094,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
static void tsdbStopStream(STsdbRepo *pRepo) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
for (int i = 0; i < pRepo->config.maxTables; i++) {
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
(*pRepo->appH.cqDropFunc)(pTable->cqhandle);
......
......@@ -21,7 +21,7 @@
static FORCE_INLINE STsdbBufBlock *tsdbGetCurrBufBlock(STsdbRepo *pRepo);
static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes);
static SMemTable * tsdbNewMemTable(STsdbCfg *pCfg);
static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
static void tsdbFreeMemTable(SMemTable *pMemTable);
static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
static void tsdbFreeTableData(STableData *pTableData);
......@@ -30,13 +30,15 @@ static void * tsdbCommitData(void *arg);
static int tsdbCommitMeta(STsdbRepo *pRepo);
static void tsdbEndCommit(STsdbRepo *pRepo);
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey);
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables);
// ---------------- INTERNAL FUNCTIONS ----------------
int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
STsdbCfg * pCfg = &pRepo->config;
STsdbMeta * pMeta = pRepo->tsdbMeta;
int32_t level = 0;
int32_t headSize = 0;
TSKEY key = dataRowKey(row);
......@@ -45,7 +47,7 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
SSkipList * pSList = NULL;
int bytes = 0;
if (pMemTable != NULL && pMemTable->tData[TABLE_TID(pTable)] != NULL &&
if (pMemTable != NULL && TABLE_TID(pTable) < pMemTable->maxTables && pMemTable->tData[TABLE_TID(pTable)] != NULL &&
pMemTable->tData[TABLE_TID(pTable)]->uid == TABLE_UID(pTable)) {
pTableData = pMemTable->tData[TABLE_TID(pTable)];
pSList = pTableData->pData;
......@@ -66,13 +68,20 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
// Operations above may change pRepo->mem, retake those values
ASSERT(pRepo->mem != NULL);
pMemTable = pRepo->mem;
if (TABLE_TID(pTable) >= pMemTable->maxTables) {
if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) return -1;;
}
pTableData = pMemTable->tData[TABLE_TID(pTable)];
if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
if (pTableData != NULL) { // destroy the table skiplist (may have race condition problem)
taosWLockLatch(&(pMemTable->latch));
pMemTable->tData[TABLE_TID(pTable)] = NULL;
tsdbFreeTableData(pTableData);
taosWUnLockLatch(&(pMemTable->latch));
}
pTableData = tsdbNewTableData(pCfg, pTable);
if (pTableData == NULL) {
tsdbError("vgId:%d failed to insert row with key %" PRId64
......@@ -122,7 +131,6 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
int ref = T_REF_DEC(pMemTable);
tsdbDebug("vgId:%d unref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref);
if (ref == 0) {
STsdbCfg * pCfg = &pRepo->config;
STsdbBufPool *pBufPool = pRepo->pPool;
SListNode *pNode = NULL;
......@@ -139,7 +147,7 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
}
if (tsdbUnlockRepo(pRepo) < 0) return -1;
for (int i = 0; i < pCfg->maxTables; i++) {
for (int i = 0; i < pMemTable->maxTables; i++) {
if (pMemTable->tData[i] != NULL) {
tsdbFreeTableData(pMemTable->tData[i]);
}
......@@ -161,11 +169,24 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) {
tsdbRefMemTable(pRepo, *pIMem);
if (tsdbUnlockRepo(pRepo) < 0) return -1;
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
if (*pMem != NULL) taosRLockLatch(&((*pMem)->latch));
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
return 0;
}
void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) {
if (pMem != NULL) {
taosRUnLockLatch(&(pMem->latch));
tsdbUnRefMemTable(pRepo, pMem);
}
if (pIMem != NULL) {
tsdbUnRefMemTable(pRepo, pIMem);
}
}
void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
STsdbCfg * pCfg = &pRepo->config;
STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo);
......@@ -182,7 +203,7 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
}
if (pRepo->mem == NULL) {
SMemTable *pMemTable = tsdbNewMemTable(&pRepo->config);
SMemTable *pMemTable = tsdbNewMemTable(pRepo);
if (pMemTable == NULL) return NULL;
if (tsdbLockRepo(pRepo) < 0) {
......@@ -329,7 +350,9 @@ static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes) {
listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
}
static SMemTable* tsdbNewMemTable(STsdbCfg* pCfg) {
static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
SMemTable *pMemTable = (SMemTable *)calloc(1, sizeof(*pMemTable));
if (pMemTable == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
......@@ -340,7 +363,8 @@ static SMemTable* tsdbNewMemTable(STsdbCfg* pCfg) {
pMemTable->keyLast = 0;
pMemTable->numOfRows = 0;
pMemTable->tData = (STableData**)calloc(pCfg->maxTables, sizeof(STableData*));
pMemTable->maxTables = pMeta->maxTables;
pMemTable->tData = (STableData **)calloc(pMemTable->maxTables, sizeof(STableData *));
if (pMemTable->tData == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
......@@ -398,9 +422,6 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) {
goto _err;
}
// TODO: operation here should not be here, remove it
pTableData->pData->level = 1;
return pTableData;
_err:
......@@ -473,7 +494,7 @@ static void *tsdbCommitData(void *arg) {
_exit:
tdFreeDataCols(pDataCols);
tsdbDestroyCommitIters(iters, pCfg->maxTables);
tsdbDestroyCommitIters(iters, pMem->maxTables);
tsdbDestroyHelper(&whelper);
tsdbEndCommit(pRepo);
tsdbInfo("vgId:%d commit over", pRepo->config.tsdbId);
......@@ -552,12 +573,13 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
STsdbCfg * pCfg = &pRepo->config;
STsdbFileH *pFileH = pRepo->tsdbFileH;
SFileGroup *pGroup = NULL;
SMemTable * pMem = pRepo->imem;
TSKEY minKey = 0, maxKey = 0;
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
// Check if there are data to commit to this file
int hasDataToCommit = tsdbHasDataToCommit(iters, pCfg->maxTables, minKey, maxKey);
int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey);
if (!hasDataToCommit) {
tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid);
return 0;
......@@ -570,7 +592,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
return -1;
}
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid, pCfg->maxTables)) == NULL) {
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) {
tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
goto _err;
}
......@@ -582,7 +604,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
}
// Loop to commit data in each table
for (int tid = 1; tid < pCfg->maxTables; tid++) {
for (int tid = 1; tid < pMem->maxTables; tid++) {
SCommitIter *pIter = iters + tid;
if (pIter->pTable == NULL) continue;
......@@ -643,11 +665,10 @@ _err:
}
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
STsdbCfg * pCfg = &(pRepo->config);
SMemTable *pMem = pRepo->imem;
STsdbMeta *pMeta = pRepo->tsdbMeta;
SCommitIter *iters = (SCommitIter *)calloc(pCfg->maxTables, sizeof(SCommitIter));
SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter));
if (iters == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
......@@ -656,7 +677,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
if (tsdbRLockRepoMeta(pRepo) < 0) goto _err;
// reference all tables
for (int i = 0; i < pCfg->maxTables; i++) {
for (int i = 0; i < pMem->maxTables; i++) {
if (pMeta->tables[i] != NULL) {
tsdbRefTable(pMeta->tables[i]);
iters[i].pTable = pMeta->tables[i];
......@@ -665,7 +686,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err;
for (int i = 0; i < pCfg->maxTables; i++) {
for (int i = 0; i < pMem->maxTables; i++) {
if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) {
if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
......@@ -679,7 +700,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
return iters;
_err:
tsdbDestroyCommitIters(iters, pCfg->maxTables);
tsdbDestroyCommitIters(iters, pMem->maxTables);
return NULL;
}
......@@ -694,4 +715,26 @@ static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) {
}
free(iters);
}
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) {
ASSERT(pMemTable->maxTables < maxTables);
STableData **pTableData = (STableData **)calloc(maxTables, sizeof(STableData *));
if (pTableData == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
memcpy((void *)pTableData, (void *)pMemTable->tData, sizeof(STableData *) * pMemTable->maxTables);
STableData **tData = pMemTable->tData;
taosWLockLatch(&(pMemTable->latch));
pMemTable->maxTables = maxTables;
pMemTable->tData = pTableData;
taosWUnLockLatch(&(pMemTable->latch));
tfree(tData);
return 0;
}
\ No newline at end of file
......@@ -49,6 +49,7 @@ static int tsdbGetTableEncodeSize(int8_t act, STable *pTable);
static void * tsdbInsertTableAct(STsdbRepo *pRepo, int8_t act, void *buf, STable *pTable);
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
// ------------------ OUTER FUNCTIONS ------------------
int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
......@@ -60,13 +61,13 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
int tid = pCfg->tableId.tid;
STable * pTable = NULL;
if (tid < 0 || tid >= pRepo->config.maxTables) {
if (tid < 1 || tid > TSDB_MAX_TABLES) {
tsdbError("vgId:%d failed to create table since invalid tid %d", REPO_ID(pRepo), tid);
terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO;
goto _err;
}
if (pMeta->tables[tid] != NULL) {
if (tid < pMeta->maxTables && pMeta->tables[tid] != NULL) {
if (TABLE_UID(pMeta->tables[tid]) == pCfg->tableId.uid) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable));
......@@ -422,7 +423,8 @@ STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
goto _err;
}
pMeta->tables = (STable **)calloc(pCfg->maxTables, sizeof(STable *));
pMeta->maxTables = TSDB_INIT_NTABLES + 1;
pMeta->tables = (STable **)calloc(pMeta->maxTables, sizeof(STable *));
if (pMeta->tables == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
......@@ -434,7 +436,7 @@ STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
goto _err;
}
pMeta->uidMap = taosHashInit(pCfg->maxTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
pMeta->uidMap = taosHashInit(TSDB_INIT_NTABLES * 1.1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
if (pMeta->uidMap == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
......@@ -484,14 +486,13 @@ _err:
}
int tsdbCloseMeta(STsdbRepo *pRepo) {
STsdbCfg * pCfg = &pRepo->config;
STsdbMeta *pMeta = pRepo->tsdbMeta;
SListNode *pNode = NULL;
STable * pTable = NULL;
if (pMeta == NULL) return 0;
tdCloseKVStore(pMeta->pStore);
for (int i = 1; i < pCfg->maxTables; i++) {
for (int i = 1; i < pMeta->maxTables; i++) {
tsdbFreeTable(pMeta->tables[i]);
}
......@@ -624,9 +625,8 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
static void tsdbOrgMeta(void *pHandle) {
STsdbRepo *pRepo = (STsdbRepo *)pHandle;
STsdbMeta *pMeta = pRepo->tsdbMeta;
STsdbCfg * pCfg = &pRepo->config;
for (int i = 1; i < pCfg->maxTables; i++) {
for (int i = 1; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable != NULL && pTable->type == TSDB_CHILD_TABLE) {
tsdbAddTableIntoIndex(pMeta, pTable, true);
......@@ -781,6 +781,9 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
goto _err;
}
} else {
if (TABLE_TID(pTable) >= pMeta->maxTables) {
if (tsdbAdjustMetaTables(pRepo, TABLE_TID(pTable)) < 0) goto _err;
}
if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE && addIdx) { // add STABLE to the index
if (tsdbAddTableIntoIndex(pMeta, pTable, true) < 0) {
tsdbDebug("vgId:%d failed to add table %s to meta while add table to index since %s", REPO_ID(pRepo),
......@@ -788,6 +791,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
goto _err;
}
}
ASSERT(TABLE_TID(pTable) < pMeta->maxTables);
pMeta->tables[TABLE_TID(pTable)] = pTable;
pMeta->nTables++;
}
......@@ -827,7 +831,6 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
SListIter lIter = {0};
SListNode *pNode = NULL;
STable * tTable = NULL;
STsdbCfg * pCfg = &(pRepo->config);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
int maxCols = schemaNCols(pSchema);
......@@ -860,7 +863,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
if (maxCols == pMeta->maxCols || maxRowBytes == pMeta->maxRowBytes) {
maxCols = 0;
maxRowBytes = 0;
for (int i = 0; i < pCfg->maxTables; i++) {
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable != NULL) {
pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
......@@ -1215,7 +1218,9 @@ static void *tsdbInsertTableAct(STsdbRepo *pRepo, int8_t act, void *buf, STable
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) {
int tlen = tsdbGetTableEncodeSize(TSDB_DROP_META, pTable);
void *buf = tsdbAllocBytes(pRepo, tlen);
ASSERT(buf != NULL);
if (buf == NULL) {
return -1;
}
void *pBuf = buf;
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
......@@ -1266,5 +1271,28 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) {
tsdbRemoveTableFromMeta(pRepo, pTable, true, true);
}
return 0;
}
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
ASSERT(tid >= pMeta->maxTables);
int maxTables = tsdbGetNextMaxTables(tid);
STable **tables = (STable **)calloc(maxTables, sizeof(STable *));
if (tables == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
memcpy((void *)tables, (void *)pMeta->tables, sizeof(STable *) * pMeta->maxTables);
pMeta->maxTables = maxTables;
STable **tTables = pMeta->tables;
pMeta->tables = tables;
tfree(tTables);
tsdbDebug("vgId:%d tsdb meta maxTables is adjusted as %d", REPO_ID(pRepo), maxTables);
return 0;
}
\ No newline at end of file
......@@ -147,6 +147,7 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) goto _err;
pFile->info.size = TSDB_FILE_HEAD_SIZE;
pFile->info.magic = TSDB_FILE_INIT_MAGIC;
pFile->info.len = 0;
if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
}
} else {
......@@ -302,6 +303,10 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
}
if (helperType(pHelper) == TSDB_WRITE_HELPER && pHelper->curCompIdx.hasLast) {
pHelper->hasOldLastBlock = true;
}
helperSetState(pHelper, TSDB_HELPER_TABLE_SET);
ASSERT(pHelper->state == ((TSDB_HELPER_TABLE_SET << 1) - 1));
}
......@@ -555,10 +560,6 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
}
helperSetState(pHelper, TSDB_HELPER_IDX_LOAD);
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
pFile->info.len = 0;
}
// Copy the memory for outside usage
if (target && pHelper->idxH.numOfIdx > 0)
memcpy(target, pHelper->idxH.pIdxArray, sizeof(SCompIdx) * pHelper->idxH.numOfIdx);
......@@ -1231,8 +1232,8 @@ static int tsdbLoadColData(SRWHelper *pHelper, SFile *pFile, SCompBlock *pCompBl
if (tsdbCheckAndDecodeColumnData(pDataCol, pHelper->pBuffer, pCompCol->len, pCompBlock->algorithm,
pCompBlock->numOfRows, pHelper->pRepo->config.maxRowsPerFileBlock,
pHelper->compBuffer, tsizeof(pHelper->compBuffer)) < 0) {
tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pHelper->pRepo), pFile->fname, pCompCol->colId,
(int64_t)pCompCol->offset);
tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pHelper->pRepo), pFile->fname,
pCompCol->colId, offset);
return -1;
}
......@@ -1259,13 +1260,21 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
SCompCol *pCompCol = NULL;
while (true) {
ASSERT(dcol < pDataCols->numOfCols);
if (dcol >= pDataCols->numOfCols) {
pDataCol = NULL;
break;
}
pDataCol = &pDataCols->cols[dcol];
ASSERT(pDataCol->colId <= colId);
if (pDataCol->colId == colId) break;
dcol++;
if (pDataCol->colId > colId) {
pDataCol = NULL;
break;
} else {
dcol++;
if (pDataCol->colId == colId) break;
}
}
if (pDataCol == NULL) continue;
ASSERT(pDataCol->colId == colId);
if (colId == 0) { // load the key row
......@@ -1275,15 +1284,24 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
compCol.offset = TSDB_KEY_COL_OFFSET;
pCompCol = &compCol;
} else { // load non-key rows
while (ccol < pCompBlock->numOfCols) {
pCompCol = &pHelper->pCompData->cols[ccol];
if (pCompCol->colId >= colId) break;
ccol++;
while (true) {
if (ccol >= pCompBlock->numOfCols) {
pCompCol = NULL;
break;
}
pCompCol = &(pHelper->pCompData->cols[ccol]);
if (pCompCol->colId > colId) {
pCompCol = NULL;
break;
} else {
ccol++;
if (pCompCol->colId == colId) break;
}
}
if (ccol >= pCompBlock->numOfCols || pCompCol->colId > colId) {
if (pCompCol == NULL) {
dataColSetNEleNull(pDataCol, pCompBlock->numOfRows, pDataCols->maxPoints);
dcol++;
continue;
}
......@@ -1291,8 +1309,6 @@ static int tsdbLoadBlockDataColsImpl(SRWHelper *pHelper, SCompBlock *pCompBlock,
}
if (tsdbLoadColData(pHelper, pFile, pCompBlock, pCompCol, pDataCol) < 0) goto _err;
dcol++;
if (colId != 0) ccol++;
}
return 0;
......@@ -1517,8 +1533,8 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
if (rows2 == 0) { // all data filtered out
*(pCommitIter->pIter) = slIter;
} else {
if (rows1 + rows2 < pCfg->minRowsPerFileBlock && pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS &&
!TSDB_NLAST_FILE_OPENED(pHelper)) {
if (pCompBlock->numOfRows + rows2 < pCfg->minRowsPerFileBlock &&
pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) {
tdResetDataCols(pDataCols);
int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, rows1, pDataCols,
pDataCols0->cols[0].pData, pDataCols0->numOfRows);
......
......@@ -175,6 +175,9 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS
TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
if (pQueryHandle == NULL) {
goto out_of_memory;
}
pQueryHandle->order = pCond->order;
pQueryHandle->window = pCond->twindow;
pQueryHandle->pTsdb = tsdb;
......@@ -260,8 +263,8 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
return (TsdbQueryHandleT) pQueryHandle;
out_of_memory:
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbCleanupQueryHandle(pQueryHandle);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
}
......@@ -317,17 +320,20 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
}
assert(pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL);
if (pHandle->mem && pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) {
// TODO: add uid check
if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables &&
pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) {
pCheckInfo->iter = tSkipListCreateIterFromVal(pHandle->mem->tData[pCheckInfo->tableId.tid]->pData,
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
(const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
}
if (pHandle->imem && pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) {
if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables &&
pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) {
pCheckInfo->iiter = tSkipListCreateIterFromVal(pHandle->imem->tData[pCheckInfo->tableId.tid]->pData,
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
(const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
}
// both iterators are NULL, no data in buffer right now
if (pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL) {
return false;
......@@ -1529,7 +1535,6 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists
static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) {
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
assert(numOfTables <= ((STsdbRepo*)pQueryHandle->pTsdb)->config.maxTables);
while (pQueryHandle->activeIndex < numOfTables) {
if (hasMoreDataInCache(pQueryHandle)) {
......@@ -2418,8 +2423,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
tfree(pQueryHandle->statis);
// todo check error
tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->mem);
tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->imem);
tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, pQueryHandle->mem, pQueryHandle->imem);
tsdbDestroyHelper(&pQueryHandle->rhelper);
......
......@@ -98,7 +98,7 @@ static void tsdbSetCfg(STsdbCfg *pCfg, int32_t tsdbId, int32_t cacheBlockSize, i
pCfg->tsdbId = tsdbId;
pCfg->cacheBlockSize = cacheBlockSize;
pCfg->totalBlocks = totalBlocks;
pCfg->maxTables = maxTables;
// pCfg->maxTables = maxTables;
pCfg->daysPerFile = daysPerFile;
pCfg->keep = keep;
pCfg->minRowsPerFileBlock = minRows;
......
......@@ -123,7 +123,7 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize;
tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks;
tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
// tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
......@@ -630,14 +630,14 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnodeCfg->cfg.cfgVersion);
len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnodeCfg->cfg.cacheBlockSize);
len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnodeCfg->cfg.totalBlocks);
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
// len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnodeCfg->cfg.daysPerFile);
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnodeCfg->cfg.daysToKeep);
len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnodeCfg->cfg.daysToKeep1);
len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnodeCfg->cfg.daysToKeep2);
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.minRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.maxRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime);
// len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime);
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision);
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression);
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnodeCfg->cfg.walLevel);
......@@ -729,12 +729,12 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
}
pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint;
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
if (!maxTables || maxTables->type != cJSON_Number) {
vError("vgId:%d, failed to read vnode cfg, maxTables not found", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.maxTables = maxTables->valueint;
// cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
// if (!maxTables || maxTables->type != cJSON_Number) {
// vError("vgId:%d, failed to read vnode cfg, maxTables not found", pVnode->vgId);
// goto PARSE_OVER;
// }
// pVnode->tsdbCfg.maxTables = maxTables->valueint;
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
......@@ -778,12 +778,12 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
}
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
if (!commitTime || commitTime->type != cJSON_Number) {
vError("vgId:%d, failed to read vnode cfg, commitTime not found", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
// cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
// if (!commitTime || commitTime->type != cJSON_Number) {
// vError("vgId:%d, failed to read vnode cfg, commitTime not found", pVnode->vgId);
// goto PARSE_OVER;
// }
// pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
cJSON *precision = cJSON_GetObjectItem(root, "precision");
if (!precision || precision->type != cJSON_Number) {
......
#!/bin/bash
DATA_DIR=/mnt/root/testdata
NUM_LOOP=1
NUM_OF_FILES=100
OUT_FILE=cassandraWrite.out
rowsPerRequest=(1 10 50 100 500 1000 2000)
function printTo {
if $verbose ; then
echo $1
fi
}
function runTest {
for c in `seq 1 $clients`; do
avgRPR[$c]=0
done
printf "R/R, "
for c in `seq 1 $clients`; do
if [ "$c" == "1" ]; then
printf "$c client, "
else
printf "$c clients, "
fi
done
printf "\n"
for r in ${rowsPerRequest[@]}; do
printf "$r, "
for c in `seq 1 $clients`; do
totalRPR=0
for i in `seq 1 $NUM_LOOP`; do
printTo "loop i:$i, java -jar $CAS_TEST_DIR/cassandratest/target/cassandratest-1.0-SNAPSHOT-jar-with-dependencies.jar \
-datadir $DATA_DIR \
-numofFiles $NUM_OF_FILES \
-rowsperrequest $r \
-writeclients $c \
-conf $CAS_TEST_DIR/application.conf"
java -jar $CAS_TEST_DIR/cassandratest/target/cassandratest-1.0-SNAPSHOT-jar-with-dependencies.jar \
-datadir $DATA_DIR \
-numofFiles $NUM_OF_FILES \
-rowsperrequest $r \
-writeclients $c \
-conf $CAS_TEST_DIR/application.conf \
2>&1 > $OUT_FILE
RPR=`cat $OUT_FILE | grep "insertation speed:" | awk '{print $(NF-1)}'`
totalRPR=`echo "scale=4; $totalRPR + $RPR" | bc`
printTo "rows:$r, clients:$c, i:$i RPR:$RPR"
done
avgRPR[$c]=`echo "scale=4; $totalRPR / $NUM_LOOP" | bc`
done
for c in `seq 1 $clients`; do
printf "${avgRPR[$c]}, "
done
printf "\n"
done
}
################ Main ################
verbose=false
clients=1
while : ; do
case $1 in
-v)
verbose=true
shift ;;
-c)
clients=$2
shift 2;;
*)
break ;;
esac
done
printTo "Cassandra Test begin.."
WORK_DIR=/mnt/root/TDengine
CAS_TEST_DIR=$WORK_DIR/tests/comparisonTest/cassandra
runTest
printTo "Cassandra Test done!"
#!/bin/bash
today=`date +"%Y%m%d"`
TDENGINE_DIR=/home/shuduo/work/taosdata/TDengine.cover
TDENGINE_COVERAGE_REPORT=$TDENGINE_DIR/tests/coverage-report-$today.log
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
function buildTDengine {
echo "check if TDengine need build"
cd $TDENGINE_DIR
git remote prune origin > /dev/null
git remote update > /dev/null
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
LOCAL_COMMIT=`git rev-parse --short @`
echo " LOCAL: $LOCAL_COMMIT"
echo "REMOTE: $REMOTE_COMMIT"
# reset counter
lcov -d . --zerocounters
cd $TDENGINE_DIR/debug
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
echo "repo up-to-date"
else
echo "repo need to pull"
git reset --hard
git pull
LOCAL_COMMIT=`git rev-parse --short @`
rm -rf *
cmake -DCOVER=true -DRANDOM_FILE_FAIL=true .. > /dev/null
make > /dev/null
fi
make install > /dev/null
}
function runGeneralCaseOneByOne {
while read -r line; do
if [[ $line =~ ^./test.sh* ]]; then
general_case=`echo $line | grep -w general`
if [ -n "$general_case" ]; then
case=`echo $line |grep general| awk '{print $NF}'`
./test.sh -f $case > /dev/null 2>&1 && \
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_COVERAGE_REPORT || \
echo -e "${RED}$case failed${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
fi
fi
done < $1
}
function runTest {
echo "run Test"
cd $TDENGINE_DIR/tests/script
[ -d ../../sim ] && rm -rf ../../sim
[ -f $TDENGINE_COVERAGE_REPORT ] && rm $TDENGINE_COVERAGE_REPORT
runGeneralCaseOneByOne jenkins/basic.txt
totalSuccess=`grep 'success' $TDENGINE_COVERAGE_REPORT | wc -l`
if [ "$totalSuccess" -gt "0" ]; then
echo -e "\n${GREEN} ### Total $totalSuccess coverage test case(s) succeed! ### ${NC}" | tee -a $TDENGINE_COVERAGE_REPORT
fi
totalFailed=`grep 'failed\|fault' $TDENGINE_COVERAGE_REPORT | wc -l`
if [ "$totalFailed" -ne "0" ]; then
echo -e "${RED} ### Total $totalFailed coverage test case(s) failed! ### ${NC}\n" | tee -a $TDENGINE_COVERAGE_REPORT
# exit $totalPyFailed
fi
cd $TDENGINE_DIR/tests
rm -rf ../sim
./test-all.sh full python | tee -a $TDENGINE_COVERAGE_REPORT
# Test Connector
stopTaosd
$TDENGINE_DIR/debug/build/bin/taosd -c $TDENGINE_DIR/debug/test/cfg > /dev/null &
sleep 10
cd $TDENGINE_DIR/src/connector/jdbc
mvn clean package
mvn test | tee -a $TDENGINE_COVERAGE_REPORT
# Test C Demo
stopTaosd
$TDENGINE_DIR/debug/build/bin/taosd -c $TDENGINE_DIR/debug/test/cfg > /dev/null &
sleep 10
yes | $TDENGINE_DIR/debug/build/bin/demo 127.0.0.1 | tee -a $TDENGINE_COVERAGE_REPORT
# Test waltest
dataDir=`grep dataDir $TDENGINE_DIR/debug/test/cfg/taos.cfg|awk '{print $2}'`
walDir=`find $dataDir -name "wal"|head -n1`
echo "dataDir: $dataDir" | tee -a $TDENGINE_COVERAGE_REPORT
echo "walDir: $walDir" | tee -a $TDENGINE_COVERAGE_REPORT
if [ -n "$walDir" ]; then
yes | $TDENGINE_DIR/debug/build/bin/waltest -p $walDir | tee -a $TDENGINE_COVERAGE_REPORT
fi
# run Unit Test
echo "Run Unit Test: utilTest, queryTest and cliTest"
$TDENGINE_DIR/debug/build/bin/utilTest > /dev/null && echo "utilTest pass!" || echo "utilTest failed!"
$TDENGINE_DIR/debug/build/bin/queryTest > /dev/null && echo "queryTest pass!" || echo "queryTest failed!"
$TDENGINE_DIR/debug/build/bin/cliTest > /dev/null && echo "cliTest pass!" || echo "cliTest failed!"
stopTaosd
}
function lcovFunc {
echo "collect data by lcov"
cd $TDENGINE_DIR
# collect data
lcov -d . --capture --rc lcov_branch_coverage=1 --rc genhtml_branch_coverage=1 --no-external -b $TDENGINE_DIR -o coverage.info
# remove exclude paths
lcov --remove coverage.info \
'*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' \
--rc lcov_branch_coverage=1 -o coverage.info
# generate result
lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDENGINE_COVERAGE_REPORT
# push result to coveralls.io
coveralls-lcov coverage.info | tee -a $TDENGINE_COVERAGE_REPORT
}
function sendReport {
echo "send report"
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
cd $TDENGINE_DIR
sed -i 's/\x1b\[[0-9;]*m//g' $TDENGINE_COVERAGE_REPORT
BODY_CONTENT=`cat $TDENGINE_COVERAGE_REPORT`
echo -e "to: ${receiver}\nsubject: Coverage test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
(cat - && uuencode $TDENGINE_COVERAGE_REPORT coverage-report-$today.log) | \
ssmtp "${receiver}" && echo "Report Sent!"
}
function stopTaosd {
echo "Stop taosd"
systemctl stop taosd
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
function runTestRandomFail {
exec_random_fail_sh=$1
default_exec_sh=$TDENGINE_DIR/tests/script/sh/exec.sh
[ -f $exec_random_fail_sh ] && cp $exec_random_fail_sh $default_exec_sh || exit 1
dnodes_random_fail_py=$TDENGINE_DIR/tests/pytest/util/dnodes-no-random-fail.py
default_dnodes_py=$TDENGINE_DIR/tests/pytest/util/dnodes.py
[ -f $dnodes_random_fail_py ] && cp $dnodes_random_fail_py $default_dnodes_py || exit 1
runTest NoRandomFail
}
WORK_DIR=/home/shuduo/work/taosdata
date >> $WORK_DIR/cron.log
echo "Run Coverage Test" | tee -a $WORK_DIR/cron.log
rm /tmp/core-*
stopTaosd
buildTDengine
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-random-fail.sh
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-default.sh
runTestRandomFail $TDENGINE_DIR/tests/script/sh/exec-no-random-fail.sh
lcovFunc
sendReport
stopTaosd
date >> $WORK_DIR/cron.log
echo "End of Coverage Test" | tee -a $WORK_DIR/cron.log
#!/bin/bash
today=`date +"%Y%m%d"`
TDENGINE_DIR=/home/shuduo/work/taosdata/TDengine.orig
TDENGINE_FULLTEST_REPORT=$TDENGINE_DIR/tests/full-report-$today.log
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
function buildTDengine {
echo "check if TDengine need build"
need_rebuild=false
if [ ! -d $TDENGINE_DIR ]; then
echo "No TDengine source code found!"
git clone https://github.com/taosdata/TDengine $TDENGINE_DIR
need_rebuild=true
fi
cd $TDENGINE_DIR
git remote prune origin > /dev/null
git remote update > /dev/null
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
LOCAL_COMMIT=`git rev-parse --short @`
echo " LOCAL: $LOCAL_COMMIT"
echo "REMOTE: $REMOTE_COMMIT"
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
echo "repo up-to-date"
else
echo "repo need to pull"
git pull
need_rebuild=true
fi
[ -d $TDENGINE_DIR/debug ] || mkdir $TDENGINE_DIR/debug
cd $TDENGINE_DIR/debug
[ -f $TDENGINE_DIR/debug/build/bin/taosd ] || need_rebuild=true
if $need_rebuild ; then
echo "rebuild.."
LOCAL_COMMIT=`git rev-parse --short @`
rm -rf *
cmake .. > /dev/null
make > /dev/null
fi
make install > /dev/null
}
function runGeneralCaseOneByOne {
while read -r line; do
if [[ $line =~ ^./test.sh* ]]; then
general_case=`echo $line | grep -w general`
if [ -n "$general_case" ]; then
case=`echo $line | awk '{print $NF}'`
start_time=`date +%s`
./test.sh -f $case > /dev/null 2>&1 && ret=0 || ret = 1
end_time=`date +%s`
if [[ ret -eq 0 ]]; then
echo -e "${GREEN}$case success${NC}" | tee -a $TDENGINE_FULLTEST_REPORT
else
casename=`echo $case|sed 's/\//\-/g'`
find $TDENGINE_DIR/sim -name "*log" -exec tar czf $TDENGINE_DIR/fulltest-$today-$casename.log.tar.gz {} +
echo -e "${RED}$case failed and log saved${NC}" | tee -a $TDENGINE_FULLTEST_REPORT
fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDENGINE_FULLTEST_REPORT
fi
fi
done < $1
}
function runPyCaseOneByOne {
while read -r line; do
if [[ $line =~ ^python.* ]]; then
if [[ $line != *sleep* ]]; then
case=`echo $line|awk '{print $NF}'`
start_time=`date +%s`
$line > /dev/null 2>&1 && ret=0 || ret=1
end_time=`date +%s`
if [[ ret -eq 0 ]]; then
echo -e "${GREEN}$case success${NC}" | tee -a pytest-out.log
else
casename=`echo $case|sed 's/\//\-/g'`
find $TDENGINE_DIR/sim -name "*log" -exec tar czf $TDENGINE_DIR/fulltest-$today-$casename.log.tar.gz {} +
echo -e "${RED}$case failed and log saved${NC}" | tee -a pytest-out.log
fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
else
$line > /dev/null 2>&1
fi
fi
done < $1
}
function runTest {
echo "Run Test"
cd $TDENGINE_DIR/tests/script
[ -d $TDENGINE_DIR/sim ] && rm -rf $TDENGINE_DIR/sim
[ -f $TDENGINE_FULLTEST_REPORT ] && rm $TDENGINE_FULLTEST_REPORT
runGeneralCaseOneByOne jenkins/basic.txt
totalSuccess=`grep 'success' $TDENGINE_FULLTEST_REPORT | wc -l`
if [ "$totalSuccess" -gt "0" ]; then
echo -e "\n${GREEN} ### Total $totalSuccess SIM case(s) succeed! ### ${NC}" \
| tee -a $TDENGINE_FULLTEST_REPORT
fi
totalFailed=`grep 'failed\|fault' $TDENGINE_FULLTEST_REPORT | wc -l`
if [ "$totalFailed" -ne "0" ]; then
echo -e "${RED} ### Total $totalFailed SIM case(s) failed! ### ${NC}\n" \
| tee -a $TDENGINE_FULLTEST_REPORT
fi
cd $TDENGINE_DIR/tests/pytest
[ -d $TDENGINE_DIR/sim ] && rm -rf $TDENGINE_DIR/sim
[ -f pytest-out.log ] && rm -f pytest-out.log
runPyCaseOneByOne fulltest.sh
totalPySuccess=`grep 'success' pytest-out.log | wc -l`
totalPyFailed=`grep 'failed\|fault' pytest-out.log | wc -l`
cat pytest-out.log >> $TDENGINE_FULLTEST_REPORT
if [ "$totalPySuccess" -gt "0" ]; then
echo -e "\n${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}" \
| tee -a $TDENGINE_FULLTEST_REPORT
fi
if [ "$totalPyFailed" -ne "0" ]; then
echo -e "\n${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}" \
| tee -a $TDENGINE_FULLTEST_REPORT
fi
}
function sendReport {
echo "Send Report"
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
cd $TDENGINE_DIR/tests
sed -i 's/\x1b\[[0-9;]*m//g' $TDENGINE_FULLTEST_REPORT
BODY_CONTENT=`cat $TDENGINE_FULLTEST_REPORT`
cd $TDENGINE_DIR
tar czf fulltest-$today.tar.gz fulltest-$today-*.log.tar.gz
echo -e "to: ${receiver}\nsubject: Full test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
(cat - && uuencode $TDENGINE_FULLTEST_REPORT fulltest-report-$today.log) | \
(cat - && uuencode $TDENGINE_DIR/fulltest-$today.tar.gz fulltest-$today.tar.gz) | \
ssmtp "${receiver}" && echo "Report Sent!"
}
function stopTaosd {
echo "Stop taosd"
systemctl stop taosd
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
WORK_DIR=/home/shuduo/work/taosdata
date >> $WORK_DIR/cron.log
echo "Run Full Test" | tee -a $WORK_DIR/cron.log
stopTaosd
buildTDengine
runTest
sendReport
stopTaosd
date >> $WORK_DIR/cron.log
echo "End of Full Test" | tee -a $WORK_DIR/cron.log
#!/bin/bash
today=`date +"%Y%m%d"`
TDINTERNAL_DIR=/home/shuduo/work/taosdata/TDinternal.cover
TDINTERNAL_COVERAGE_REPORT=$TDINTERNAL_DIR/community/tests/tdinternal-coverage-report-$today.log
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
function buildTDinternal {
echo "check if TDinternal need build"
cd $TDINTERNAL_DIR
NEED_COMPILE=0
# git remote update
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
LOCAL_COMMIT=`git rev-parse --short @`
echo " LOCAL: $LOCAL_COMMIT"
echo "REMOTE: $REMOTE_COMMIT"
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
echo "TDinternal repo is up-to-date"
else
echo "repo need to pull"
# git pull
# NEED_COMPILE=1
fi
lcov -d . --zerocounters
# git submodule update --init --recursive
cd $TDINTERNAL_DIR/community
TDENGINE_REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
TDENGINE_LOCAL_COMMIT=`git rev-parse --short @`
if [ "$TDENGINE_LOCAL_COMMIT" == "$TDENGINE_REMOTE_COMMIT" ]; then
echo "community repo is up-to-date"
else
echo "repo need to pull"
# git checkout develop
# git pull
# NEED_COMPILE=1
fi
cd $TDINTERNAL_DIR/debug
if [[ $NEED_COMPILE -eq 1 ]]; then
LOCAL_COMMIT=`git rev-parse --short @`
rm -rf *
cmake .. > /dev/null
make > /dev/null
fi
make install > /dev/null
}
function runUniqueCaseOneByOne {
while read -r line; do
if [[ $line =~ ^./test.sh* ]]; then
case=`echo $line | awk '{print $NF}'`
start_time=`date +%s`
./test.sh -f $case > /dev/null 2>&1 && \
echo -e "${GREEN}$case success${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT || \
echo -e "${RED}$case failed${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT
end_time=`date +%s`
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDINTERNAL_COVERAGE_REPORT
fi
done < $1
}
function runTest {
echo "Run Test"
cd $TDINTERNAL_DIR/community/tests/script
[ -d ../../sim ] && rm -rf ../../sim
[ -f $TDINTERNAL_COVERAGE_REPORT ] && rm $TDINTERNAL_COVERAGE_REPORT
runUniqueCaseOneByOne jenkins/basic.txt
totalSuccess=`grep 'success' $TDINTERNAL_COVERAGE_REPORT | wc -l`
if [ "$totalSuccess" -gt "0" ]; then
echo -e "\n${GREEN} ### Total $totalSuccess TDinternal case(s) succeed! ### ${NC}" | tee -a $TDINTERNAL_COVERAGE_REPORT
fi
totalFailed=`grep 'failed\|fault' $TDINTERNAL_COVERAGE_REPORT | wc -l`
if [ "$totalFailed" -ne "0" ]; then
echo -e "${RED} ### Total $totalFailed TDinternal case(s) failed! ### ${NC}\n" | tee -a $TDINTERNAL_COVERAGE_REPORT
# exit $totalPyFailed
fi
# Test Python test case
cd $TDINTERNAL_DIR/community/tests
/usr/bin/time -f "Total spent: %e" ./test-all.sh full python | tee -a $TDINTERNAL_COVERAGE_REPORT
# Test Connector
stopTaosd
$TDINTERNAL_DIR/debug/build/bin/taosd -c $TDINTERNAL_DIR/debug/test/cfg > /dev/null &
sleep 10
cd $TDINTERNAL_DIR/community/src/connector/jdbc
mvn clean package
mvn test | tee -a $TDINTERNAL_COVERAGE_REPORT
# Test C Demo
stopTaosd
$TDINTERNAL_DIR/debug/build/bin/taosd -c $TDINTERNAL_DIR/debug/test/cfg > /dev/null &
sleep 10
yes | $TDINTERNAL_DIR/debug/build/bin/demo 127.0.0.1 | tee -a $TDINTERNAL_COVERAGE_REPORT
# Test waltest
dataDir=`grep dataDir $TDINTERNAL_DIR/debug/test/cfg/taos.cfg|awk '{print $2}'`
walDir=`find $dataDir -name "wal"|head -n1`
echo "dataDir: $dataDir\nwalDir: $walDir" | tee -a $TDINTERNAL_COVERAGE_REPORT
if [ -n "$walDir" ]; then
yes | $TDINTERNAL_DIR/debug/build/bin/waltest -p $walDir | tee -a $TDINTERNAL_COVERAGE_REPORT
fi
stopTaosd
}
function sendReport {
echo "Send Report"
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
cd $TDINTERNAL_DIR
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_COVERAGE_REPORT
BODY_CONTENT=`cat $TDINTERNAL_COVERAGE_REPORT`
echo -e "to: ${receiver}\nsubject: TDinternal coverage test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
(cat - && uuencode tdinternal-coverage-report-$today.tar.gz tdinternal-coverage-report-$today.tar.gz) | \
(cat - && uuencode $TDINTERNAL_COVERAGE_REPORT tdinternal-coverage-report-$today.log) | \
ssmtp "${receiver}" && echo "Report Sent!"
}
function lcovFunc {
echo "collect data by lcov"
cd $TDINTERNAL_DIR
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_COVERAGE_REPORT
# collect data
lcov -d . --capture --rc lcov_branch_coverage=1 --rc genhtmml_branch_coverage=1 --no-external -b $TDINTERNAL_DIR -o coverage.info
# remove exclude paths
lcov --remove coverage.info '*/tests/*' '*/test/*' '*/deps/*' '*/plugins/*' '*/taosdef.h' \
--rc lcov_branch_coverage=1 -o coverage.info
# generate result
lcov -l --rc lcov_branch_coverage=1 coverage.info | tee -a $TDINTERNAL_COVERAGE_REPORT
genhtml -o html coverage.info
tar czf tdinternal-coverage-report-$today.tar.gz html coverage.info $TDINTERNAL_COVERAGE_REPORT
# push result to coveralls.io
# coveralls-lcov coverage.info | tee -a tdinternal-coverage-report-$today.log
}
function stopTaosd {
echo "Stop taosd"
systemctl stop taosd
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -TERM -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
WORK_DIR=/home/shuduo/work/taosdata
date >> $WORK_DIR/cron.log
echo "Run Coverage Test for TDinternal" | tee -a $WORK_DIR/cron.log
stopTaosd
buildTDinternal
runTest
lcovFunc
sendReport
stopTaosd
date >> $WORK_DIR/cron.log
echo "End of TDinternal Coverage Test" | tee -a $WORK_DIR/cron.log
#!/bin/bash
today=`date +"%Y%m%d"`
TDINTERNAL_DIR=/home/shuduo/work/taosdata/TDinternal
TDINTERNAL_TEST_REPORT=$TDINTERNAL_DIR/community/tests/tdinternal-report-$today.log
# Color setting
RED='\033[0;31m'
GREEN='\033[1;32m'
GREEN_DARK='\033[0;32m'
GREEN_UNDERLINE='\033[4;32m'
NC='\033[0m'
function buildTDinternal {
echo "check if TDinternal need build"
cd $TDINTERNAL_DIR
NEED_COMPILE=0
# git remote update
REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
LOCAL_COMMIT=`git rev-parse --short @`
echo " LOCAL: $LOCAL_COMMIT"
echo "REMOTE: $REMOTE_COMMIT"
if [ "$LOCAL_COMMIT" == "$REMOTE_COMMIT" ]; then
echo "TDinternal repo is up-to-date"
else
echo "repo need to pull"
# git pull
# NEED_COMPILE=1
fi
# git submodule update --init --recursive
cd $TDINTERNAL_DIR/community
TDENGINE_REMOTE_COMMIT=`git rev-parse --short remotes/origin/develop`
TDENGINE_LOCAL_COMMIT=`git rev-parse --short @`
if [ "$TDENGINE_LOCAL_COMMIT" == "$TDENGINE_REMOTE_COMMIT" ]; then
echo "community repo is up-to-date"
else
echo "repo need to pull"
# git checkout develop
# git pull
# NEED_COMPILE=1
fi
cd $TDINTERNAL_DIR/debug
if [[ $NEED_COMPILE -eq 1 ]]; then
LOCAL_COMMIT=`git rev-parse --short @`
rm -rf *
cmake .. > /dev/null
make > /dev/null
fi
make install > /dev/null
}
function runUniqueCaseOneByOne {
while read -r line; do
if [[ $line =~ ^./test.sh* ]]; then
case=`echo $line | awk '{print $NF}'`
start_time=`date +%s`
./test.sh -f $case > /dev/null 2>&1 && \
echo -e "${GREEN}$case success${NC}" | tee -a $TDINTERNAL_TEST_REPORT || \
echo -e "${RED}$case failed${NC}" | tee -a $TDINTERNAL_TEST_REPORT
end_time=`date +%s`
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a $TDINTERNAL_TEST_REPORT
fi
done < $1
}
function runPyCaseOneByOne {
while read -r line; do
if [[ $line =~ ^python.* ]]; then
if [[ $line != *sleep* ]]; then
case=`echo $line|awk '{print $NF}'`
start_time=`date +%s`
$line > /dev/null 2>&1 && ret=0 || ret=1
end_time=`date +%s`
if [[ ret -eq 0 ]]; then
echo -e "${GREEN}$case success${NC}" | tee -a pytest-out.log
else
casename=`echo $case|sed 's/\//\-/g'`
find $TDINTERNAL_DIR/community/sim -name "*log" -exec tar czf $TDINTERNAL_DIR/fulltest-$today-$casename.log.tar.gz {} +
echo -e "${RED}$case failed and log saved${NC}" | tee -a pytest-out.log
fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
else
$line > /dev/null 2>&1
fi
fi
done < $1
}
function runTest {
echo "Run Test"
cd $TDINTERNAL_DIR/community/tests/script
[ -d $TDINTERNAL_DIR/sim ] && rm -rf $TDINTERNAL_DIR/sim
[ -f $TDINTERNAL_TEST_REPORT ] && rm $TDINTERNAL_TEST_REPORT
runUniqueCaseOneByOne jenkins/basic.txt
totalSuccess=`grep 'success' $TDINTERNAL_TEST_REPORT | wc -l`
if [ "$totalSuccess" -gt "0" ]; then
echo -e "\n${GREEN} ### Total $totalSuccess TDinternal case(s) succeed! ### ${NC}" | tee -a $TDINTERNAL_TEST_REPORT
fi
totalFailed=`grep 'failed\|fault' $TDINTERNAL_TEST_REPORT | wc -l`
if [ "$totalFailed" -ne "0" ]; then
echo -e "${RED} ### Total $totalFailed TDinternal case(s) failed! ### ${NC}\n" | tee -a $TDINTERNAL_TEST_REPORT
# exit $totalPyFailed
fi
cd $TDINTERNAL_DIR/community/tests/pytest
[ -d $TDINTERNAL_DIR/community/sim ] && rm -rf $TDINTERNAL_DIR/community/sim
[ -f pytest-out.log ] && rm -f pytest-out.log
/usr/bin/time -f "Total spent: %e" ./test-all.sh full python | tee -a $TDINTERNAL_TEST_REPORT
runPyCaseOneByOne fulltest.sh
totalPySuccess=`grep 'success' pytest-out.log | wc -l`
totalPyFailed=`grep 'failed\|fault' pytest-out.log | wc -l`
cat pytest-out.log >> $TDINTERNAL_TEST_REPORT
if [ "$totalPySuccess" -gt "0" ]; then
echo -e "\n${GREEN} ### Total $totalPySuccess python case(s) succeed! ### ${NC}" \
| tee -a $TDINTERNAL_TEST_REPORT
fi
if [ "$totalPyFailed" -ne "0" ]; then
echo -e "\n${RED} ### Total $totalPyFailed python case(s) failed! ### ${NC}" \
| tee -a $TDINTERNAL_TEST_REPORT
fi
}
function sendReport {
echo "Send Report"
receiver="sdsang@taosdata.com, sangshuduo@gmail.com, pxiao@taosdata.com"
mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n"
cd $TDINTERNAL_DIR
sed -i 's/\x1b\[[0-9;]*m//g' $TDINTERNAL_TEST_REPORT
BODY_CONTENT=`cat $TDINTERNAL_TEST_REPORT`
cd $TDINTERNAL_DIR
tar czf fulltest-$today.tar.gz fulltest-$today-*.log.tar.gz
echo -e "to: ${receiver}\nsubject: TDinternal test report ${today}, commit ID: ${LOCAL_COMMIT}\n\n${today}:\n${BODY_CONTENT}" | \
(cat - && uuencode $TDINTERNAL_TEST_REPORT tdinternal-report-$today.log) | \
ssmtp "${receiver}" && echo "Report Sent!"
}
function stopTaosd {
echo "Stop taosd"
systemctl stop taosd
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
pkill -KILL -x taosd
sleep 1
PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'`
done
}
WORK_DIR=/home/shuduo/work/taosdata
date >> $WORK_DIR/cron.log
echo "Run Test for TDinternal" | tee -a $WORK_DIR/cron.log
buildTDinternal
runTest
sendReport
stopTaosd
date >> $WORK_DIR/cron.log
echo "End of TDinternal Test" | tee -a $WORK_DIR/cron.log
......@@ -162,6 +162,7 @@ python3 ./test.py -f client/client.py
# Misc
python3 testCompress.py
python3 testNoCompress.py
python3 testMinTablesPerVnode.py
# functions
python3 ./test.py -f functions/function_avg.py
......@@ -180,4 +181,4 @@ python3 ./test.py -f functions/function_spread.py
python3 ./test.py -f functions/function_stddev.py
python3 ./test.py -f functions/function_sum.py
python3 ./test.py -f functions/function_top.py
python3 ./test.py -f functions/function_twa.py
\ No newline at end of file
python3 ./test.py -f functions/function_twa.py
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.sub import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.conn = conn
def run(self):
sqlstr = "select * from t0"
topic = "test"
now = int(time.time() * 1000)
tdSql.prepare()
tdLog.info("create a table and insert 10 rows.")
tdSql.execute("create table t0(ts timestamp, a int, b int);")
for i in range(0, 10):
tdSql.execute("insert into t0 values (%d, %d, %d);" % (now + i, i, i))
tdLog.info("consumption 01.")
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(10)
tdLog.info("consumption 02: no new rows inserted")
tdSub.consume()
tdSub.checkRows(0)
tdLog.info("consumption 03: after one new rows inserted")
tdSql.execute("insert into t0 values (%d, 10, 10);" % (now + 10))
tdSub.consume()
tdSub.checkRows(1)
tdLog.info("consumption 04: keep progress and continue previous subscription")
tdSub.close(True)
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(0)
tdLog.info("consumption 05: remove progress and continue previous subscription")
tdSub.close(False)
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(11)
tdLog.info("consumption 06: keep progress and restart the subscription")
tdSub.close(True)
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(11)
tdSub.close(True)
def stop(self):
tdSub.close(False)
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.sub import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.conn = conn
def run(self):
sqlstr = "select * from meters"
topic = "test"
now = int(time.time() * 1000)
tdSql.prepare()
tdLog.info("create a super table and 10 sub-tables, then insert 5 rows into each sub-table.")
tdSql.execute("create table meters(ts timestamp, a int, b int) tags(area int, loc binary(20));")
for i in range(0, 10):
for j in range(0, 5):
tdSql.execute("insert into t%d using meters tags(%d, 'area%d') values (%d, %d, %d);" % (i, i, i, now + j, j, j))
tdLog.info("consumption 01.")
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(50)
tdLog.info("consumption 02: no new rows inserted")
tdSub.consume()
tdSub.checkRows(0)
tdLog.info("consumption 03: after one new rows inserted")
tdSql.execute("insert into t0 values (%d, 10, 10);" % (now + 10))
tdSub.consume()
tdSub.checkRows(1)
tdLog.info("consumption 04: keep progress and continue previous subscription")
tdSub.close(True)
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(0)
tdLog.info("consumption 05: remove progress and continue previous subscription")
tdSub.close(False)
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(51)
tdLog.info("consumption 06: keep progress and restart the subscription")
tdSub.close(True)
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(51)
tdLog.info("consumption 07: insert one row to two table then remove one table")
tdSql.execute("insert into t0 values (%d, 11, 11);" % (now + 11))
tdSql.execute("insert into t1 values (%d, 11, 11);" % (now + 11))
tdSql.execute("drop table t0")
tdSub.consume()
tdSub.checkRows(1)
tdLog.info("consumption 08: check timestamp criteria")
tdSub.close(False)
tdSub.init(self.conn.subscribe(True, topic, sqlstr + " where ts > %d" % now, 0))
tdSub.consume()
tdSub.checkRows(37)
tdLog.info("consumption 09: insert large timestamp to t2 then insert smaller timestamp to t1")
tdSql.execute("insert into t2 values (%d, 100, 100);" % (now + 100))
tdSub.consume()
tdSub.checkRows(1)
tdSql.execute("insert into t1 values (%d, 12, 12);" % (now + 12))
tdSub.consume()
tdSub.checkRows(1)
tdLog.info("consumption 10: field criteria")
tdSub.close(True)
tdSub.init(self.conn.subscribe(False, topic, sqlstr + " where a > 100", 0))
tdSql.execute("insert into t2 values (%d, 101, 100);" % (now + 101))
tdSql.execute("insert into t2 values (%d, 100, 100);" % (now + 102))
tdSql.execute("insert into t2 values (%d, 102, 100);" % (now + 103))
tdSub.consume()
tdSub.checkRows(2)
tdLog.info("consumption 11: two vnodes")
tdSql.execute("insert into t2 values (%d, 102, 100);" % (now + 104))
tdSql.execute("insert into t9 values (%d, 102, 100);" % (now + 104))
tdSub.consume()
tdSub.checkRows(2)
def stop(self):
tdSub.close(False)
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
EXEC_DIR=`dirname "$0"`
if [[ $EXEC_DIR != "." ]]
then
echo "ERROR: Please execute `basename "$0"` in its own directory (for now anyway, pardon the dust)"
exit -1
fi
CURR_DIR=`pwd`
IN_TDINTERNAL="community"
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
TAOS_DIR=$CURR_DIR/../../..
else
TAOS_DIR=$CURR_DIR/../..
fi
TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1`
LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
if [[ "$1" == *"test.py"* ]]; then
python3 ./test.py $@
else
python3 $1 $@
fi
#!/usr/bin/python
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# install pip
# pip install src/connector/python/linux/python2/
# -*- coding: utf-8 -*-
import sys
import getopt
import subprocess
from distutils.log import warn as printf
from util.log import *
from util.dnodes import *
from util.cases import *
from util.sql import *
import taos
if __name__ == "__main__":
fileName = "all"
deployPath = ""
testCluster = False
valgrind = 0
logSql = True
stop = 0
opts, args = getopt.gnu_getopt(sys.argv[1:], 'l:sgh', [
'logSql', 'stop', 'valgrind', 'help'])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
'A collection of test cases written using Python')
tdLog.printNoPrefix('-l <True:False> logSql Flag')
tdLog.printNoPrefix('-s stop All dnodes')
tdLog.printNoPrefix('-g valgrind Test Flag')
sys.exit(0)
if key in ['-l', '--logSql']:
if (value.upper() == "TRUE"):
logSql = True
elif (value.upper() == "FALSE"):
logSql = False
else:
tdLog.printNoPrefix("logSql value %s is invalid" % logSql)
sys.exit(0)
if key in ['-g', '--valgrind']:
valgrind = 1
if key in ['-s', '--stop']:
stop = 1
if (stop != 0):
if (valgrind == 0):
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
for port in range(6030, 6041):
usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
processID = subprocess.check_output(usePortPID, shell=True)
if processID:
killCmd = "kill -TERM %s" % processID
os.system(killCmd)
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
if valgrind:
time.sleep(2)
tdLog.info('stop All dnodes')
sys.exit(0)
tdDnodes.init(deployPath)
tdDnodes.setTestCluster(testCluster)
tdDnodes.setValgrind(valgrind)
tdDnodes.stopAll()
tdDnodes.addSimExtraCfg("minTablesPerVnode", "100")
tdDnodes.deploy(1)
tdDnodes.start(1)
host = '127.0.0.1'
tdLog.info("Procedures for tdengine deployed in %s" % (host))
tdCases.logSql(logSql)
conn = taos.connect(
host,
config=tdDnodes.getSimCfgPath())
tdSql.init(conn.cursor(), True)
tdSql.execute("DROP DATABASE IF EXISTS db")
tdSql.execute("CREATE DATABASE IF NOT EXISTS db")
tdSql.execute("USE db")
for i in range(0, 100):
tdSql.execute(
"CREATE TABLE IF NOT EXISTS tb%d (ts TIMESTAMP, temperature INT, humidity FLOAT)" % i)
for i in range(1, 6):
tdSql.execute("INSERT INTO tb99 values (now + %da, %d, %f)" % (i, i, i * 1.0))
tdSql.execute("DROP TABLE tb99")
tdSql.execute(
"CREATE TABLE IF NOT EXISTS tb99 (ts TIMESTAMP, temperature INT, humidity FLOAT)")
tdSql.query("SELECT * FROM tb99")
tdSql.checkRows(0)
conn.close()
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import os.path
import subprocess
from util.log import *
class TDSimClient:
def __init__(self):
self.testCluster = False
self.cfgDict = {
"numOfLogLines": "100000000",
"numOfThreadsPerCore": "2.0",
"locale": "en_US.UTF-8",
"charset": "UTF-8",
"asyncLog": "0",
"minTablesPerVnode": "4",
"maxTablesPerVnode": "1000",
"tableIncStepPerVnode": "10000",
"maxVgroupsPerDb": "1000",
"sdbDebugFlag": "143",
"rpcDebugFlag": "135",
"tmrDebugFlag": "131",
"cDebugFlag": "135",
"udebugFlag": "135",
"jnidebugFlag": "135",
"qdebugFlag": "135",
}
def init(self, path):
self.__init__()
self.path = path
def getLogDir(self):
self.logDir = "%s/sim/psim/log" % (self.path)
return self.logDir
def getCfgDir(self):
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
return self.cfgDir
def setTestCluster(self, value):
self.testCluster = value
def addExtraCfg(self, option, value):
self.cfgDict.update({option: value})
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def deploy(self):
self.logDir = "%s/sim/psim/log" % (self.path)
self.cfgDir = "%s/sim/psim/cfg" % (self.path)
self.cfgPath = "%s/sim/psim/cfg/taos.cfg" % (self.path)
cmd = "rm -rf " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "touch " + self.cfgPath
if os.system(cmd) != 0:
tdLog.exit(cmd)
if self.testCluster:
self.cfg("masterIp", "192.168.0.1")
self.cfg("secondIp", "192.168.0.2")
self.cfg("logDir", self.logDir)
for key, value in self.cfgDict.items():
self.cfg(key, value)
tdLog.debug("psim is deployed and configured by %s" % (self.cfgPath))
class TDDnode:
def __init__(self, index):
self.index = index
self.running = 0
self.deployed = 0
self.testCluster = False
self.valgrind = 0
def init(self, path):
self.path = path
def setTestCluster(self, value):
self.testCluster = value
def setValgrind(self, value):
self.valgrind = value
def getDataSize(self):
totalSize = 0
if (self.deployed == 1):
for dirpath, dirnames, filenames in os.walk(self.dataDir):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
totalSize = totalSize + os.path.getsize(fp)
return totalSize
def deploy(self):
self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index)
self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index)
self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index)
self.cfgPath = "%s/sim/dnode%d/cfg/taos.cfg" % (
self.path, self.index)
cmd = "rm -rf " + self.dataDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "rm -rf " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.dataDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.logDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "mkdir -p " + self.cfgDir
if os.system(cmd) != 0:
tdLog.exit(cmd)
cmd = "touch " + self.cfgPath
if os.system(cmd) != 0:
tdLog.exit(cmd)
if self.testCluster:
self.startIP()
if self.testCluster:
self.cfg("masterIp", "192.168.0.1")
self.cfg("secondIp", "192.168.0.2")
self.cfg("publicIp", "192.168.0.%d" % (self.index))
self.cfg("internalIp", "192.168.0.%d" % (self.index))
self.cfg("privateIp", "192.168.0.%d" % (self.index))
self.cfg("dataDir", self.dataDir)
self.cfg("logDir", self.logDir)
self.cfg("numOfLogLines", "100000000")
self.cfg("mnodeEqualVnodeNum", "0")
self.cfg("walLevel", "2")
self.cfg("fsync", "1000")
self.cfg("statusInterval", "1")
self.cfg("numOfMnodes", "3")
self.cfg("numOfThreadsPerCore", "2.0")
self.cfg("monitor", "0")
self.cfg("maxVnodeConnections", "30000")
self.cfg("maxMgmtConnections", "30000")
self.cfg("maxMeterConnections", "30000")
self.cfg("maxShellConns", "30000")
self.cfg("locale", "en_US.UTF-8")
self.cfg("charset", "UTF-8")
self.cfg("asyncLog", "0")
self.cfg("anyIp", "0")
self.cfg("dDebugFlag", "135")
self.cfg("mDebugFlag", "135")
self.cfg("sdbDebugFlag", "135")
self.cfg("rpcDebugFlag", "135")
self.cfg("tmrDebugFlag", "131")
self.cfg("cDebugFlag", "135")
self.cfg("httpDebugFlag", "135")
self.cfg("monitorDebugFlag", "135")
self.cfg("udebugFlag", "135")
self.cfg("jnidebugFlag", "135")
self.cfg("qdebugFlag", "135")
self.deployed = 1
tdLog.debug(
"dnode:%d is deployed and configured by %s" %
(self.index, self.cfgPath))
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root)-len("/build/bin")]
break
return buildPath
def start(self):
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/taosd"
if self.deployed == 0:
tdLog.exit("dnode:%d is not deployed" % (self.index))
if self.valgrind == 0:
cmd = "nohup %s -c %s > /dev/null 2>&1 & " % (
binPath, self.cfgDir)
else:
valgrindCmdline = "valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes"
cmd = "nohup %s %s -c %s 2>&1 & " % (
valgrindCmdline, binPath, self.cfgDir)
print(cmd)
if os.system(cmd) != 0:
tdLog.exit(cmd)
self.running = 1
tdLog.debug("dnode:%d is running with %s " % (self.index, cmd))
tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index))
time.sleep(5)
def stop(self):
if self.valgrind == 0:
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
if self.running != 0:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
for port in range(6030, 6041):
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
if self.valgrind:
time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
def forcestop(self):
if self.valgrind == 0:
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
if self.running != 0:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
for port in range(6030, 6041):
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
if self.valgrind:
time.sleep(2)
self.running = 0
tdLog.debug("dnode:%d is stopped by kill -KILL" % (self.index))
def startIP(self):
cmd = "sudo ifconfig lo:%d 192.168.0.%d up" % (self.index, self.index)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def stopIP(self):
cmd = "sudo ifconfig lo:%d 192.168.0.%d down" % (
self.index, self.index)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def cfg(self, option, value):
cmd = "echo '%s %s' >> %s" % (option, value, self.cfgPath)
if os.system(cmd) != 0:
tdLog.exit(cmd)
def getDnodeRootDir(self, index):
dnodeRootDir = "%s/sim/psim/dnode%d" % (self.path, index)
return dnodeRootDir
def getDnodesRootDir(self):
dnodesRootDir = "%s/sim/psim" % (self.path)
return dnodesRootDir
class TDDnodes:
def __init__(self):
self.dnodes = []
self.dnodes.append(TDDnode(1))
self.dnodes.append(TDDnode(2))
self.dnodes.append(TDDnode(3))
self.dnodes.append(TDDnode(4))
self.dnodes.append(TDDnode(5))
self.dnodes.append(TDDnode(6))
self.dnodes.append(TDDnode(7))
self.dnodes.append(TDDnode(8))
self.dnodes.append(TDDnode(9))
self.dnodes.append(TDDnode(10))
self.simDeployed = False
def init(self, path):
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
binPath = os.path.dirname(os.path.realpath(__file__))
binPath = binPath + "/../../../debug/"
tdLog.debug("binPath %s" % (binPath))
binPath = os.path.realpath(binPath)
tdLog.debug("binPath real path %s" % (binPath))
# cmd = "sudo cp %s/build/lib/libtaos.so /usr/local/lib/taos/" % (binPath)
# tdLog.debug(cmd)
# os.system(cmd)
# cmd = "sudo cp %s/build/bin/taos /usr/local/bin/taos/" % (binPath)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
# tdLog.debug("execute %s" % (cmd))
# cmd = "sudo cp %s/build/bin/taosd /usr/local/bin/taos/" % (binPath)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
# tdLog.debug("execute %s" % (cmd))
if path == "":
# self.path = os.path.expanduser('~')
self.path = os.path.abspath(binPath + "../../")
else:
self.path = os.path.realpath(path)
for i in range(len(self.dnodes)):
self.dnodes[i].init(self.path)
self.sim = TDSimClient()
self.sim.init(self.path)
def setTestCluster(self, value):
self.testCluster = value
def setValgrind(self, value):
self.valgrind = value
def deploy(self, index):
self.sim.setTestCluster(self.testCluster)
if (self.simDeployed == False):
self.sim.deploy()
self.simDeployed = True
self.check(index)
self.dnodes[index - 1].setTestCluster(self.testCluster)
self.dnodes[index - 1].setValgrind(self.valgrind)
self.dnodes[index - 1].deploy()
def cfg(self, index, option, value):
self.check(index)
self.dnodes[index - 1].cfg(option, value)
def start(self, index):
self.check(index)
self.dnodes[index - 1].start()
def stop(self, index):
self.check(index)
self.dnodes[index - 1].stop()
def getDataSize(self, index):
self.check(index)
return self.dnodes[index - 1].getDataSize()
def forcestop(self, index):
self.check(index)
self.dnodes[index - 1].forcestop()
def startIP(self, index):
self.check(index)
if self.testCluster:
self.dnodes[index - 1].startIP()
def stopIP(self, index):
self.check(index)
if self.dnodes[index - 1].testCluster:
self.dnodes[index - 1].stopIP()
def check(self, index):
if index < 1 or index > 10:
tdLog.exit("index:%d should on a scale of [1, 10]" % (index))
def stopAll(self):
tdLog.info("stop all dnodes")
for i in range(len(self.dnodes)):
self.dnodes[i].stop()
psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
if processID:
cmd = "sudo systemctl stop taosd"
os.system(cmd)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
def getDnodesRootDir(self):
dnodesRootDir = "%s/sim" % (self.path)
return dnodesRootDir
def getSimCfgPath(self):
return self.sim.getCfgDir()
def getSimLogPath(self):
return self.sim.getLogDir()
def addSimExtraCfg(self, option, value):
self.sim.addExtraCfg(option, value)
tdDnodes = TDDnodes()
......@@ -349,7 +349,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -358,7 +358,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -465,7 +465,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -474,7 +474,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......
......@@ -349,7 +349,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -358,7 +358,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -465,7 +465,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -474,7 +474,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......
......@@ -351,7 +351,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -360,7 +360,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -467,7 +467,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w taosd| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......@@ -476,7 +476,7 @@ class TDDnodes:
psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
while(processID):
killCmd = "kill -KILL %s > /dev/null 2>&1" % processID
killCmd = "kill -TERM %s > /dev/null 2>&1" % processID
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
......
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import time
import datetime
from util.log import *
class TDSub:
def __init__(self):
self.consumedRows = 0
self.consumedCols = 0
def init(self, sub):
self.sub = sub
def close(self, keepProgress):
self.sub.close(keepProgress)
def consume(self):
self.data = self.sub.consume()
self.consumedRows = len(self.data)
self.consumedCols = len(self.sub.fields)
return self.consumedRows
def checkRows(self, expectRows):
if self.consumedRows != expectRows:
tdLog.exit("consumed rows:%d != expect:%d" % (self.consumedRows, expectRows))
tdLog.info("consumed rows:%d == expect:%d" % (self.consumedRows, expectRows))
tdSub = TDSub()
......@@ -249,6 +249,7 @@ cd ../../../debug; make
./test.sh -f unique/big/maxvnodes.sim
./test.sh -f unique/big/tcp.sim
./test.sh -f unique/cluster/alter.sim
./test.sh -f unique/cluster/balance1.sim
./test.sh -f unique/cluster/balance2.sim
./test.sh -f unique/cluster/balance3.sim
......@@ -325,7 +326,7 @@ cd ../../../debug; make
#./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
#./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
#./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim
......
#!/bin/bash
# if [ $# != 4 || $# != 5 ]; then
# echo "argument list need input : "
# echo " -n nodeName"
# echo " -s start/stop"
# echo " -c clear"
# exit 1
# fi
NODE_NAME=
EXEC_OPTON=
CLEAR_OPTION="false"
while getopts "n:s:u:x:ct" arg
do
case $arg in
n)
NODE_NAME=$OPTARG
;;
s)
EXEC_OPTON=$OPTARG
;;
c)
CLEAR_OPTION="clear"
;;
t)
SHELL_OPTION="true"
;;
u)
USERS=$OPTARG
;;
x)
SIGNAL=$OPTARG
;;
?)
echo "unkown argument"
;;
esac
done
SCRIPT_DIR=`dirname $0`
cd $SCRIPT_DIR/../
SCRIPT_DIR=`pwd`
IN_TDINTERNAL="community"
if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then
cd ../../..
else
cd ../../
fi
TAOS_DIR=`pwd`
TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1`
if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3`
else
BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2`
fi
BUILD_DIR=$TAOS_DIR/$BIN_DIR/build
SIM_DIR=$TAOS_DIR/sim
NODE_DIR=$SIM_DIR/$NODE_NAME
EXE_DIR=$BUILD_DIR/bin
CFG_DIR=$NODE_DIR/cfg
LOG_DIR=$NODE_DIR/log
DATA_DIR=$NODE_DIR/data
MGMT_DIR=$NODE_DIR/data/mgmt
TSDB_DIR=$NODE_DIR/data/tsdb
TAOS_CFG=$NODE_DIR/cfg/taos.cfg
echo ------------ $EXEC_OPTON $NODE_NAME
TAOS_FLAG=$SIM_DIR/tsim/flag
if [ -f "$TAOS_FLAG" ]; then
EXE_DIR=/usr/local/bin/taos
fi
if [ "$CLEAR_OPTION" = "clear" ]; then
echo rm -rf $MGMT_DIR $TSDB_DIR
rm -rf $TSDB_DIR
rm -rf $MGMT_DIR
fi
if [ "$EXEC_OPTON" = "start" ]; then
echo "ExcuteCmd:" $EXE_DIR/taosd -c $CFG_DIR
if [ "$SHELL_OPTION" = "true" ]; then
TT=`date +%s`
mkdir ${LOG_DIR}/${TT}
nohup valgrind --log-file=${LOG_DIR}/${TT}/valgrind.log --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
else
nohup $EXE_DIR/taosd -c $CFG_DIR > /dev/null 2>&1 &
fi
else
#relative path
RCFG_DIR=sim/$NODE_NAME/cfg
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
while [ -n "$PID" ]
do
if [ "$SIGNAL" = "SIGKILL" ]; then
echo try to kill by signal SIGKILL
kill -9 $PID
else
echo try to kill by signal SIGINT
kill -SIGINT $PID
fi
sleep 1
PID=`ps -ef|grep taosd | grep $RCFG_DIR | grep -v grep | awk '{print $2}'`
done
fi
......@@ -4,10 +4,6 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
......@@ -16,64 +12,28 @@ system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode2 -c http -v 1
system sh/cfg.sh -n dnode3 -c http -v 1
system sh/cfg.sh -n dnode1 -c mDebugFlag -v 143
system sh/cfg.sh -n dnode2 -c mDebugFlag -v 143
system sh/cfg.sh -n dnode3 -c mDebugFlag -v 143
system sh/cfg.sh -n dnode1 -c sdbDebugFlag -v 143
system sh/cfg.sh -n dnode2 -c sdbDebugFlag -v 143
system sh/cfg.sh -n dnode3 -c sdbDebugFlag -v 143
system sh/cfg.sh -n dnode1 -c sdebugFlag -v 143
system sh/cfg.sh -n dnode2 -c sdebugFlag -v 143
system sh/cfg.sh -n dnode3 -c sdebugFlag -v 143
system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135
system sh/cfg.sh -n dnode2 -c rpcDebugFlag -v 135
system sh/cfg.sh -n dnode3 -c rpcDebugFlag -v 135
system sh/cfg.sh -n dnode1 -c tsdbDebugFlag -v 131
system sh/cfg.sh -n dnode2 -c tsdbDebugFlag -v 131
system sh/cfg.sh -n dnode3 -c tsdbDebugFlag -v 131
system sh/cfg.sh -n dnode1 -c mqttDebugFlag -v 131
system sh/cfg.sh -n dnode2 -c mqttDebugFlag -v 131
system sh/cfg.sh -n dnode3 -c mqttDebugFlag -v 131
system sh/cfg.sh -n dnode1 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode2 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode3 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode1 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode2 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode3 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode1 -c udebugFlag -v 131
system sh/cfg.sh -n dnode2 -c udebugFlag -v 131
system sh/cfg.sh -n dnode3 -c udebugFlag -v 131
system sh/cfg.sh -n dnode1 -c http -v 0
system sh/cfg.sh -n dnode2 -c http -v 0
system sh/cfg.sh -n dnode3 -c http -v 0
system sh/cfg.sh -n dnode1 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode2 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode3 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000000
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 1000000
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 1000000
system sh/cfg.sh -n dnode1 -c replica -v 3
system sh/cfg.sh -n dnode2 -c replica -v 3
system sh/cfg.sh -n dnode3 -c replica -v 3
print ============== deploy
system sh/exec.sh -n dnode1 -s start
sleep 2001
system sh/exec.sh -n dnode1 -s start
sleep 5001
sql connect
sql create dnode $hostname2
sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
print =============== step1
$x = 0
......@@ -112,8 +72,8 @@ print $data0_3 $data2_3
$x = $x + 1
sleep 2000
if $x == 1000 then
sleep 5000
if $x == 100000 then
return -1
endi
......
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c role -v 1
system sh/cfg.sh -n dnode2 -c role -v 2
system sh/cfg.sh -n dnode3 -c role -v 2
system sh/cfg.sh -n dnode4 -c role -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode3 -c wallevel -v 2
system sh/cfg.sh -n dnode4 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c balance -v 0
system sh/cfg.sh -n dnode2 -c balance -v 0
system sh/cfg.sh -n dnode3 -c balance -v 0
system sh/cfg.sh -n dnode4 -c balance -v 0
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sleep 3000
sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sleep 3000
print ========== step2
sql create database d1
sql create table d1.t1 (t timestamp, i int)
sql insert into d1.t1 values(now+1s, 15)
sql insert into d1.t1 values(now+2s, 14)
sql insert into d1.t1 values(now+3s, 13)
sql insert into d1.t1 values(now+4s, 12)
sql insert into d1.t1 values(now+5s, 11)
print ========== step3
sleep 2000
sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname4
system sh/exec.sh -n dnode4 -s start
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_1 != 0 then
return -1
endi
if $data2_2 != 1 then
return -1
endi
if $data2_3 != 0 then
return -1
endi
if $data2_4 != 0 then
return -1
endi
print ========== step4
sql alter dnode 2 balance "vnode:2-dnode:3"
$x = 0
show4:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 0 then
goto show4
endi
if $data2_3 != 1 then
goto show4
endi
if $data2_4 != 0 then
goto show4
endi
print ========== step5
sql alter dnode 3 balance "vnode:2-dnode:4"
$x = 0
show5:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 0 then
goto show5
endi
if $data2_3 != 0 then
goto show5
endi
if $data2_4 != 1 then
goto show5
endi
print ========== step6
sql alter dnode 4 balance "vnode:2-dnode:2"
$x = 0
show6:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 1 then
goto show6
endi
if $data2_3 != 0 then
goto show6
endi
if $data2_4 != 0 then
goto show6
endi
print ========== step7
sql select * from d1.t1 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 11 then
return -1
endi
if $data11 != 12 then
return -1
endi
if $data21 != 13 then
return -1
endi
if $data31 != 14 then
return -1
endi
if $data41 != 15 then
return -1
endi
print ========== step8
sql_error sql alter dnode 4 balance "vnode:2-dnode:5"
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT
\ No newline at end of file
......@@ -10,8 +10,8 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
#add_executable(insertPerTable insertPerTable.c)
#target_link_libraries(insertPerTable taos_static pthread)
#add_executable(insertPerRow insertPerRow.c)
#target_link_libraries(insertPerRow taos_static pthread)
add_executable(insertPerRow insertPerRow.c)
target_link_libraries(insertPerRow taos_static pthread)
#add_executable(importOneRow importOneRow.c)
#target_link_libraries(importOneRow taos_static pthread)
......
......@@ -44,14 +44,16 @@ void createDbAndTable();
void insertData();
int32_t randomData[MAX_RANDOM_POINTS];
int64_t rowsPerTable = 10000;
int64_t rowsPerTable = 1000000000;
int64_t pointsPerTable = 1;
int64_t numOfThreads = 1;
int64_t numOfTablesPerThread = 200;
int64_t numOfThreads = 10;
int64_t numOfTablesPerThread = 100;
char dbName[32] = "db";
char stableName[64] = "st";
int32_t cache = 16;
int32_t tables = 5000;
int32_t cache = 1;
int32_t replica = 3;
int32_t days = 10;
int32_t interval = 1000;
int main(int argc, char *argv[]) {
shellParseArgument(argc, argv);
......@@ -77,7 +79,7 @@ void createDbAndTable() {
exit(1);
}
sprintf(qstr, "create database if not exists %s cache %d maxtables %d", dbName, cache, tables);
sprintf(qstr, "create database if not exists %s cache %d replica %d days %d", dbName, cache, replica, days);
pSql = taos_query(con, qstr);
int32_t code = taos_errno(pSql);
if (code != 0) {
......@@ -239,7 +241,7 @@ void *syncTest(void *param) {
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
int64_t start = 1430000000000;
int64_t interval = 1000; // 1000 ms
interval = 1000; // 1000 ms
char *sql = qstr;
char inserStr[] = "insert into";
......@@ -309,10 +311,14 @@ void printHelp() {
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of threads to be used, default is ", numOfThreads);
printf("%s%s\n", indent, "-n");
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of tables per thread, default is ", numOfTablesPerThread);
printf("%s%s\n", indent, "-tables");
printf("%s%s%s%d\n", indent, indent, "Database parameters tables, default is ", tables);
printf("%s%s\n", indent, "-replica");
printf("%s%s%s%d\n", indent, indent, "Database parameters replica, default is ", replica);
printf("%s%s\n", indent, "-cache");
printf("%s%s%s%d\n", indent, indent, "Database parameters cache, default is ", cache);
printf("%s%s%s%d\n", indent, indent, "Database parameters replica, default is ", cache);
printf("%s%s\n", indent, "-days");
printf("%s%s%s%d\n", indent, indent, "Database parameters days, default is ", days);
printf("%s%s\n", indent, "-interval");
printf("%s%s%s%d\n", indent, indent, "Interval of each rows in ms, default is ", interval);
exit(EXIT_SUCCESS);
}
......@@ -336,10 +342,14 @@ void shellParseArgument(int argc, char *argv[]) {
numOfThreads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-n") == 0) {
numOfTablesPerThread = atoi(argv[++i]);
} else if (strcmp(argv[i], "-tables") == 0) {
tables = atoi(argv[++i]);
} else if (strcmp(argv[i], "-replica") == 0) {
replica = atoi(argv[++i]);
} else if (strcmp(argv[i], "-cache") == 0) {
cache = atoi(argv[++i]);
} else if (strcmp(argv[i], "-days") == 0) {
days = atoi(argv[++i]);
} else if (strcmp(argv[i], "-interval") == 0) {
interval = atoi(argv[++i]);
} else {
}
}
......@@ -349,7 +359,7 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%snumOfThreads:%" PRId64 "%s", GREEN, numOfThreads, NC);
pPrint("%snumOfTablesPerThread:%" PRId64 "%s", GREEN, numOfTablesPerThread, NC);
pPrint("%scache:%" PRId32 "%s", GREEN, cache, NC);
pPrint("%stables:%" PRId32 "%s", GREEN, tables, NC);
pPrint("%stables:%" PRId32 "%s", GREEN, replica, NC);
pPrint("%sdbName:%s%s", GREEN, dbName, NC);
pPrint("%stableName:%s%s", GREEN, stableName, NC);
pPrint("%sstart to run%s", GREEN, NC);
......
......@@ -670,12 +670,12 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
ret = taos_errno(pSql);
if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) {
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret));
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
ret = 0;
break;
} else if (ret != 0) {
simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s",
script->fileName, script->taos, rest, ret, tstrerror(ret), taos_errstr(pSql));
script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret), taos_errstr(pSql));
if (line->errorJump == SQL_JUMP_TRUE) {
script->linePos = line->jump;
......@@ -691,7 +691,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
}
if (ret) {
sprintf(script->error, "lineNum:%d. sql:%s failed, ret:%d:%s", line->lineNum, rest, ret, tstrerror(ret));
sprintf(script->error, "lineNum:%d. sql:%s failed, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, tstrerror(ret));
return false;
}
......@@ -821,7 +821,7 @@ bool simExecuteRestFulSqlCommand(SScript *script, char *rest) {
ret = simExecuteRestFulCommand(script, command);
if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST ||
ret == TSDB_CODE_MND_DB_ALREADY_EXIST) {
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret));
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
ret = 0;
break;
} else if (ret != 0) {
......@@ -957,12 +957,12 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) {
if (ret != TSDB_CODE_SUCCESS) {
simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s",
script->fileName, script->taos, rest, ret, tstrerror(ret));
script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
script->linePos++;
return true;
}
sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret, tstrerror(ret));
sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, tstrerror(ret));
return false;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册