未验证 提交 e3f4c903 编写于 作者: W wu champion 提交者: GitHub

Merge branch 'develop' into feature/TD-1380

......@@ -17,6 +17,7 @@ SET(TD_MQTT FALSE)
SET(TD_TSDB_PLUGINS FALSE)
SET(TD_STORAGE FALSE)
SET(TD_TOPIC FALSE)
SET(TD_MODULE FALSE)
SET(TD_COVER FALSE)
SET(TD_MEM_CHECK FALSE)
......
......@@ -6,6 +6,7 @@ node {
}
def skipstage=0
def abortPreviousBuilds() {
def currentJobName = env.JOB_NAME
def currentBuildNumber = env.BUILD_NUMBER.toInteger()
......@@ -24,7 +25,7 @@ def abortPreviousBuilds() {
build.doKill() //doTerm(),doKill(),doTerm()
}
}
//abort previous build
// abort previous build
abortPreviousBuilds()
def abort_previous(){
def buildNumber = env.BUILD_NUMBER as int
......@@ -32,7 +33,8 @@ def abort_previous(){
milestone(buildNumber)
}
def pre_test(){
sh '''
sudo rmtaos || echo "taosd has not installed"
'''
......@@ -79,6 +81,10 @@ pipeline {
changeRequest()
}
steps {
script{
abort_previous()
abortPreviousBuilds()
}
sh'''
cp -r ${WORKSPACE} ${WORKSPACE}.tes
cd ${WORKSPACE}.tes
......
......@@ -258,10 +258,16 @@ TDengine 社区生态中也有一些非常友好的第三方连接器,可以
TDengine 的测试框架和所有测试例全部开源。
点击[这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。
点击 [这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。
# 成为社区贡献者
点击[这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。
#加入技术交流群
TDengine官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小T为好友,即可入群。
点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。
# 加入技术交流群
TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小T为好友,即可入群。
# [谁在使用TDengine](https://github.com/taosdata/TDengine/issues/2432)
欢迎所有 TDengine 用户及贡献者在 [这里](https://github.com/taosdata/TDengine/issues/2432) 分享您在当前工作中开发/使用 TDengine 的故事。
......@@ -250,3 +250,6 @@ Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to th
Add WeChat “tdengine” to join the group,you can communicate with other users.
# [User List](https://github.com/taosdata/TDengine/issues/2432)
If you are using TDengine and feel it helps or you'd like to do some contributions, please add your company to [user list](https://github.com/taosdata/TDengine/issues/2432) and let us know your needs.
......@@ -29,6 +29,10 @@ IF (TD_TOPIC)
ADD_DEFINITIONS(-D_TOPIC)
ENDIF ()
IF (TD_MODULE)
ADD_DEFINITIONS(-D_MODULE)
ENDIF ()
IF (TD_GODLL)
ADD_DEFINITIONS(-D_TD_GO_DLL_)
ENDIF ()
......
......@@ -17,6 +17,14 @@ ELSEIF (${TOPIC} MATCHES "false")
MESSAGE(STATUS "Build without topic plugins")
ENDIF ()
IF (${TD_MODULE} MATCHES "true")
SET(TD_MODULE TRUE)
MESSAGE(STATUS "Build with module plugins")
ELSEIF (${TOPIC} MATCHES "false")
SET(TD_MODULE FALSE)
MESSAGE(STATUS "Build without module plugins")
ENDIF ()
IF (${COVER} MATCHES "true")
SET(TD_COVER TRUE)
MESSAGE(STATUS "Build with test coverage")
......
......@@ -6,19 +6,27 @@
### 内存需求
每个 DB 可以创建固定数目的 vgroup,默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置;vgroup 中的每个副本会是一个 vnode;每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算:
每个 Database 可以创建固定数目的 vgroup,默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置;vgroup 中的每个副本会是一个 vnode;每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算:
```
Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
Database Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
```
示例:假设是 4 核机器,cache 是缺省大小 16M, blocks 是缺省值 6,假设有 10 万张表,标签总长度是 256 字节,则总的内存需求为:4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。
示例:假设是 4 核机器,cache 是缺省大小 16M, blocks 是缺省值 6,并且一个 DB 中有 10 万张表,标签总长度是 256 字节,则这个 DB 总的内存需求为:4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。
注意:从这个公式计算得到的内存容量,应理解为系统的“必要需求”,而不是“内存总数”。在实际运行的生产系统中,由于操作系统缓存、资源管理调度等方面的需要,内存规划应当在计算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。
在实际的系统运维中,我们通常会更关心 TDengine 服务进程(taosd)会占用的内存量。
```
taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存
```
其中:
1. “vnode 内存”指的是集群中所有的 Database 存储分摊到当前 taosd 节点上所占用的内存资源。可以按上文“Database Memory Size”计算公式估算每个 DB 的内存占用量进行加总,再按集群中总共的 TDengine 节点数做平均(如果设置为多副本,则还需要乘以对应的副本倍数)。
2. “mnode 内存”指的是集群中管理节点所占用的资源。如果一个 taosd 节点上分布有 mnode 管理节点,则内存消耗还需要增加“0.2KB * 集群中数据表总数”。
3. “查询内存”指的是服务端处理查询请求时所需要占用的内存。单条查询语句至少会占用“0.2KB * 查询涉及的数据表总数”的内存量。
实际运行的系统往往会根据数据特点的不同,将数据存放在不同的 DB 里。因此做规划时,也需要考虑
注意:以上内存估算方法,主要讲解了系统的“必须内存需求”,而不是“内存总数上限”。在实际运行的生产环境中,由于操作系统缓存、资源管理调度等方面的原因,内存规划应当在估算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。并且,生产环境通常会配置系统资源的监控工具,以便及时发现硬件资源的紧缺情况
如果内存充裕,可以加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。
最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。
### CPU 需求
......
......@@ -367,7 +367,7 @@ typedef struct SSqlObj {
int64_t svgroupRid;
int64_t squeryLock;
int32_t retryReason; // previous error code
struct SSqlObj *prev, *next;
int64_t self;
} SSqlObj;
......
......@@ -310,9 +310,50 @@ void tscAsyncResultOnError(SSqlObj* pSql) {
taosScheduleTask(tscQhandle, &schedMsg);
}
int tscSendMsgToServer(SSqlObj *pSql);
static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SQueryInfo* pQueryInfo) {
// handle the invalid table error code for super table.
// update the pExpr info, colList info, number of table columns
// TODO Re-parse this sql and issue the corresponding subquery as an alternative for this case.
if (pSql->retryReason == TSDB_CODE_TDB_INVALID_TABLE_ID) {
int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo);
int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta);
int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta);
SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
for (int32_t i = 0; i < numOfExprs; ++i) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
pExpr->uid = pTableMetaInfo->pTableMeta->id.uid;
if (pExpr->colInfo.colIndex >= 0) {
int32_t index = pExpr->colInfo.colIndex;
if ((TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && index >= numOfCols) ||
(TSDB_COL_IS_TAG(pExpr->colInfo.flag) && (index < numOfCols || index >= (numOfCols + numOfTags)))) {
return pSql->retryReason;
}
int tscSendMsgToServer(SSqlObj *pSql);
if ((pSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) &&
strcasecmp(pExpr->colInfo.name, pSchema[pExpr->colInfo.colIndex].name) != 0) {
return pSql->retryReason;
}
}
}
// validate the table columns information
for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) {
SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i);
if (pCol->colIndex.columnIndex >= numOfCols) {
return pSql->retryReason;
}
}
} else {
// do nothing
}
return TSDB_CODE_SUCCESS;
}
void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param);
......@@ -339,7 +380,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) {
tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS);
......@@ -349,6 +391,10 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
}
assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0));
code = updateMetaBeforeRetryQuery(pSql, pTableMetaInfo, pQueryInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
// tscProcessSql can add error into async res
tscProcessSql(pSql);
......
......@@ -670,7 +670,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
if ((code = setKillInfo(pSql, pInfo, pInfo->type)) != TSDB_CODE_SUCCESS) {
return code;
}
break;
}
case TSDB_SQL_SYNC_DB_REPLICA: {
const char* msg1 = "invalid db name";
SStrToken* pzName = taosArrayGet(pInfo->pMiscInfo->a, 0);
assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1);
code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName);
if (code != TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1);
}
break;
}
......
......@@ -350,8 +350,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
taosMsleep(duration);
}
pSql->retryReason = rpcMsg->code;
rpcMsg->code = tscRenewTableMeta(pSql, 0);
// if there is an error occurring, proceed to the following error handling procedure.
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosReleaseRef(tscObjRef, handle);
......@@ -1284,6 +1284,23 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) {
SSqlCmd *pCmd = &pSql->cmd;
pCmd->payloadLen = sizeof(SSyncDbMsg);
if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) {
tscError("%p failed to malloc for query msg", pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SSyncDbMsg *pSyncMsg = (SSyncDbMsg *)pCmd->payload;
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
tNameExtractFullName(&pTableMetaInfo->name, pSyncMsg->db);
pCmd->msgType = TSDB_MSG_TYPE_CM_SYNC_DB;
return TSDB_CODE_SUCCESS;
}
int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
STscObj *pObj = pSql->pTscObj;
SSqlCmd *pCmd = &pSql->cmd;
......@@ -2559,6 +2576,7 @@ void tscInitMsgsFp() {
tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg;
tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg;
tscBuildMsg[TSDB_SQL_SYNC_DB_REPLICA] = tscBuildSyncDbReplicaMsg;
tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg;
tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg;
tscBuildMsg[TSDB_SQL_CREATE_DNODE] = tscBuildCreateDnodeMsg;
......
......@@ -51,6 +51,7 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_ACCT, "alter-acct" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_TABLE, "alter-table" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_DB, "alter-db" )
TSDB_DEFINE_SQL_TYPE(TSDB_SQL_SYNC_DB_REPLICA, "sync db-replica")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_MNODE, "create-mnode" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_MNODE, "drop-mnode" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DNODE, "create-dnode" )
......@@ -87,13 +88,13 @@ enum {
*/
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_VERSION, "serv-version" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_USER, "current-user ")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MAX, "max" )
};
......
......@@ -35,6 +35,10 @@ IF (TD_TOPIC)
TARGET_LINK_LIBRARIES(taosd topic)
ENDIF ()
IF (TD_MODULE AND TD_LINUX)
TARGET_LINK_LIBRARIES(taosd module dl)
ENDIF ()
SET(PREPARE_ENV_CMD "prepare_env_cmd")
SET(PREPARE_ENV_TARGET "prepare_env_target")
ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}
......
......@@ -39,6 +39,13 @@
#include "dnodeMPeer.h"
#include "dnodeShell.h"
#include "dnodeTelemetry.h"
#include "module.h"
#if !defined(_MODULE) || !defined(_TD_LINUX)
int32_t moduleStart() { return 0; }
void moduleStop() {}
#endif
void *tsDnodeTmr = NULL;
static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED;
......@@ -146,6 +153,7 @@ int32_t dnodeInitSystem() {
}
dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING);
moduleStart();
dnodeReportStep("TDengine", "initialized successfully", 1);
dInfo("TDengine is initialized successfully");
......@@ -155,6 +163,7 @@ int32_t dnodeInitSystem() {
void dnodeCleanUpSystem() {
if (dnodeGetRunStatus() != TSDB_RUN_STATUS_STOPPED) {
moduleStop();
dnodeSetRunStatus(TSDB_RUN_STATUS_STOPPED);
dnodeCleanupTmr();
dnodeCleanupComponents();
......
......@@ -49,6 +49,7 @@ int32_t dnodeInitShell() {
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue;
dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = dnodeDispatchToMWriteQueue;
......
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef TDENGINE_MODULE
#define TDENGINE_MODULE
#ifdef __cplusplus
extern "C" {
#endif
int32_t moduleStart();
void moduleStop();
#ifdef __cplusplus
}
#endif
#endif
......@@ -77,6 +77,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DB, "create-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_DB, "alter-db" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SYNC_DB, "sync-db-replica" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TABLE, "create-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_TABLE, "drop-table" )
TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TABLE, "alter-table" )
......@@ -574,7 +575,7 @@ typedef struct {
typedef struct {
char db[TSDB_TABLE_FNAME_LEN];
uint8_t ignoreNotExists;
} SDropDbMsg, SUseDbMsg;
} SDropDbMsg, SUseDbMsg, SSyncDbMsg;
// IMPORTANT: sizeof(SVnodeStatisticInfo) should not exceed
// TSDB_FILE_HEADER_LEN/4 - TSDB_FILE_HEADER_VERSION_SIZE
......
......@@ -152,56 +152,57 @@
#define TK_NOW 133
#define TK_RESET 134
#define TK_QUERY 135
#define TK_ADD 136
#define TK_COLUMN 137
#define TK_TAG 138
#define TK_CHANGE 139
#define TK_SET 140
#define TK_KILL 141
#define TK_CONNECTION 142
#define TK_STREAM 143
#define TK_COLON 144
#define TK_ABORT 145
#define TK_AFTER 146
#define TK_ATTACH 147
#define TK_BEFORE 148
#define TK_BEGIN 149
#define TK_CASCADE 150
#define TK_CLUSTER 151
#define TK_CONFLICT 152
#define TK_COPY 153
#define TK_DEFERRED 154
#define TK_DELIMITERS 155
#define TK_DETACH 156
#define TK_EACH 157
#define TK_END 158
#define TK_EXPLAIN 159
#define TK_FAIL 160
#define TK_FOR 161
#define TK_IGNORE 162
#define TK_IMMEDIATE 163
#define TK_INITIALLY 164
#define TK_INSTEAD 165
#define TK_MATCH 166
#define TK_KEY 167
#define TK_OF 168
#define TK_RAISE 169
#define TK_REPLACE 170
#define TK_RESTRICT 171
#define TK_ROW 172
#define TK_STATEMENT 173
#define TK_TRIGGER 174
#define TK_VIEW 175
#define TK_SEMI 176
#define TK_NONE 177
#define TK_PREV 178
#define TK_LINEAR 179
#define TK_IMPORT 180
#define TK_TBNAME 181
#define TK_JOIN 182
#define TK_INSERT 183
#define TK_INTO 184
#define TK_VALUES 185
#define TK_SYNCDB 136
#define TK_ADD 137
#define TK_COLUMN 138
#define TK_TAG 139
#define TK_CHANGE 140
#define TK_SET 141
#define TK_KILL 142
#define TK_CONNECTION 143
#define TK_STREAM 144
#define TK_COLON 145
#define TK_ABORT 146
#define TK_AFTER 147
#define TK_ATTACH 148
#define TK_BEFORE 149
#define TK_BEGIN 150
#define TK_CASCADE 151
#define TK_CLUSTER 152
#define TK_CONFLICT 153
#define TK_COPY 154
#define TK_DEFERRED 155
#define TK_DELIMITERS 156
#define TK_DETACH 157
#define TK_EACH 158
#define TK_END 159
#define TK_EXPLAIN 160
#define TK_FAIL 161
#define TK_FOR 162
#define TK_IGNORE 163
#define TK_IMMEDIATE 164
#define TK_INITIALLY 165
#define TK_INSTEAD 166
#define TK_MATCH 167
#define TK_KEY 168
#define TK_OF 169
#define TK_RAISE 170
#define TK_REPLACE 171
#define TK_RESTRICT 172
#define TK_ROW 173
#define TK_STATEMENT 174
#define TK_TRIGGER 175
#define TK_VIEW 176
#define TK_SEMI 177
#define TK_NONE 178
#define TK_PREV 179
#define TK_LINEAR 180
#define TK_IMPORT 181
#define TK_TBNAME 182
#define TK_JOIN 183
#define TK_INSERT 184
#define TK_INTO 185
#define TK_VALUES 186
#define TK_SPACE 300
......
......@@ -41,7 +41,7 @@
"insert_mode": "taosc",
"insert_rows": 1000,
"multi_thread_write_one_tbl": "no",
"rows_per_tbl": 20,
"interlace_rows": 20,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
......
......@@ -41,7 +41,7 @@
"insert_mode": "taosc",
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"rows_per_tbl": 0,
"interlace_rows": 0,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
......
此差异已折叠。
......@@ -50,6 +50,7 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn
static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *pConn);
static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg);
static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg);
static int32_t mnodeProcessSyncDbMsg(SMnodeMsg *pMsg);
int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg);
#ifndef _TOPIC
......@@ -178,6 +179,7 @@ int32_t mnodeInitDbs() {
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DB, mnodeProcessCreateDbMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_ALTER_DB, mnodeProcessAlterDbMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DB, mnodeProcessDropDbMsg);
mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_SYNC_DB, mnodeProcessSyncDbMsg);
mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DB, mnodeGetDbMeta);
mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DB, mnodeRetrieveDbs);
mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DB, mnodeCancelGetNextDb);
......@@ -1184,6 +1186,10 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) {
return mnodeDropDb(pMsg);
}
static int32_t mnodeProcessSyncDbMsg(SMnodeMsg *pMsg) {
return 0;
}
void mnodeDropAllDbs(SAcctObj *pAcct) {
int32_t numOfDbs = 0;
SDbObj *pDb = NULL;
......
......@@ -726,6 +726,9 @@ expritem(A) ::= . {A = 0;}
///////////////////////////////////reset query cache//////////////////////////////////////
cmd ::= RESET QUERY CACHE. { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);}
///////////////////////////////////sync replica database//////////////////////////////////
cmd ::= SYNCDB ids(X) REPLICA.{ setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &X);}
///////////////////////////////////ALTER TABLE statement//////////////////////////////////
cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). {
X.n += F.n;
......
......@@ -911,6 +911,7 @@ void setDCLSqlElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) {
SStrToken *pToken = va_arg(va, SStrToken *);
taosArrayPush(pInfo->pMiscInfo->a, pToken);
}
va_end(va);
}
......
......@@ -100,6 +100,7 @@ static SKeyword keywordTable[] = {
{"ACCOUNT", TK_ACCOUNT},
{"USE", TK_USE},
{"DESCRIBE", TK_DESCRIBE},
{"SYNCDB", TK_SYNCDB},
{"ALTER", TK_ALTER},
{"PASS", TK_PASS},
{"PRIVILEGE", TK_PRIVILEGE},
......
此差异已折叠。
......@@ -38,7 +38,7 @@
"insert_rows": 100,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 0,
"rows_per_tbl": 3,
"interlace_rows": 3,
"max_sql_len": 1024,
"disorder_ratio": 0,
"disorder_range": 1000,
......
......@@ -68,20 +68,14 @@ function prepareBuild {
rm -rf $CURR_DIR/../../../../release/*
fi
if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
cd $CURR_DIR/../../../../packaging
echo $CURR_DIR
echo $IN_TDINTERNAL
echo "generating TDeninger packages"
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
pwd
./release.sh -v cluster -n $VERSION >> /dev/null 2>&1
else
pwd
./release.sh -v edge -n $VERSION >> /dev/null 2>&1
fi
cd $CURR_DIR/../../../../packaging
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
if [ ! -e $DOCKER_DIR/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
echo "generating TDeninge enterprise packages"
./release.sh -v cluster -n $VERSION >> /dev/null 2>&1
if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then
echo "no TDengine install package found"
exit 1
......@@ -91,7 +85,17 @@ function prepareBuild {
echo "no arbitrator install package found"
exit 1
fi
else
cd $CURR_DIR/../../../../release
mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
fi
else
if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then
echo "generating TDeninge community packages"
./release.sh -v edge -n $VERSION >> /dev/null 2>&1
if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then
echo "no TDengine install package found"
exit 1
......@@ -101,16 +105,11 @@ function prepareBuild {
echo "no arbitrator install package found"
exit 1
fi
fi
cd $CURR_DIR/../../../../release
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
else
cd $CURR_DIR/../../../../release
mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR
fi
fi
fi
rm -rf $DOCKER_DIR/*.yml
......
......@@ -39,7 +39,7 @@
"insert_rows": 100000,
"multi_thread_write_one_tbl": "no",
"number_of_tbl_in_one_sql": 1,
"rows_per_tbl": 100,
"interlace_rows": 100,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
......
......@@ -198,6 +198,7 @@ python3 ./test.py -f query/queryWithTaosdKilled.py
python3 ./test.py -f query/floatCompare.py
python3 ./test.py -f query/query1970YearsAf.py
python3 ./test.py -f query/bug3351.py
python3 ./test.py -f query/bug3375.py
......@@ -234,16 +235,6 @@ python3 client/twoClients.py
python3 test.py -f query/queryInterval.py
python3 test.py -f query/queryFillTest.py
# tools
python3 test.py -f tools/taosdemoTest.py
python3 test.py -f tools/taosdemoTestWithoutMetric.py
python3 test.py -f tools/taosdemoTestWithJson.py
python3 test.py -f tools/taosdemoTestLimitOffset.py
python3 test.py -f tools/taosdumpTest.py
python3 test.py -f tools/taosdemoTest2.py
python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py
# subscribe
python3 test.py -f subscribe/singlemeter.py
#python3 test.py -f subscribe/stability.py
......@@ -253,6 +244,18 @@ python3 test.py -f subscribe/supertable.py
#======================p3-end===============
#======================p4-start===============
# tools
python3 test.py -f tools/taosdumpTest.py
python3 test.py -f tools/taosdemoTest.py
python3 test.py -f tools/taosdemoTestWithoutMetric.py
python3 test.py -f tools/taosdemoTestWithJson.py
python3 test.py -f tools/taosdemoTestLimitOffset.py
python3 test.py -f tools/taosdemoTest2.py
python3 test.py -f tools/taosdemoTestSampleData.py
python3 test.py -f tools/taosdemoTestInterlace.py
python3 test.py -f tools/taosdemoTestQuery.py
python3 ./test.py -f update/merge_commit_data-0.py
# wal
python3 ./test.py -f wal/addOldWalTest.py
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
tdSql.execute("drop database if exists db")
tdSql.execute("create database if not exists db keep 36500")
tdSql.execute("use db")
tdLog.printNoPrefix("==========step1:create table && insert data")
tdSql.execute(
"create table stb1 (ts timestamp, c11 int) TAGS(t11 int, t12 int )"
)
tdSql.execute(
"create table stb2 (ts timestamp, c21 int) TAGS(t21 int, t22 int )"
)
tdSql.execute("create table t10 using stb1 tags(1, 10)")
tdSql.execute("create table t20 using stb2 tags(1, 12)")
tdSql.execute("insert into t10 values (1600000000000, 1)")
tdSql.execute("insert into t10 values (1610000000000, 2)")
tdSql.execute("insert into t20 values (1600000000000, 3)")
tdSql.execute("insert into t20 values (1610000000000, 4)")
tdLog.printNoPrefix("==========step2:query crash test")
tdSql.query("select stb1.c11, stb1.t11, stb1.t12 from stb2,stb1 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
tdSql.checkRows(2)
tdSql.query("select stb2.c21, stb2.t21, stb2.t21 from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
tdSql.checkRows(2)
tdSql.query("select top(stb2.c21,2) from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
tdSql.checkRows(2)
tdSql.query("select last(stb2.c21) from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts")
tdSql.checkRows(1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
......@@ -18,6 +18,7 @@ import json
import subprocess
import datetime
from util.log import *
from util.sql import *
from util.cases import *
......@@ -39,6 +40,7 @@ class TDTestCase:
tdLog.debug(f"binPath {binPath}")
binPath = os.path.realpath(binPath)
tdLog.debug(f"binPath real path {binPath}")
if path == "":
self.path = os.path.abspath(binPath + "../../")
else:
......@@ -187,12 +189,12 @@ class TDTestCase:
"select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' "
)
tdSql.checkRows(719)
tdSql.query(
"select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' "
)
tdSql.checkRows(680)
tdSql.query(
"select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' "
)
......@@ -251,5 +253,6 @@ class TDTestCase:
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())
\ No newline at end of file
......@@ -10,7 +10,7 @@
"result_file": "./insert_res.txt",
"confirm_parameter_prompt": "no",
"insert_interval": 5000,
"rows_per_tbl": 50,
"interlace_rows": 50,
"num_of_records_per_req": 100,
"max_sql_len": 1024000,
"databases": [{
......@@ -42,7 +42,7 @@
"insert_mode": "taosc",
"insert_rows": 250,
"multi_thread_write_one_tbl": "no",
"rows_per_tbl": 80,
"interlace_rows": 80,
"max_sql_len": 1024000,
"disorder_ratio": 0,
"disorder_range": 1000,
......
{
"filetype": "query",
"cfgdir": "/etc/taos",
"host": "127.0.0.1",
"port": 6030,
"user": "root",
"password": "taosdata",
"confirm_parameter_prompt": "no",
"databases": "test",
"query_times": 1,
"super_table_query": {
"stblname": "meters",
"query_interval": 10,
"threads": 8,
"sqls": [
{
"sql": "select last_row(ts) from xxxx",
"result": ""
}
]
}
}
###################################################################
##################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
......@@ -25,9 +25,6 @@ class TDTestCase:
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.numberOfTables = 10000
self.numberOfRecords = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
......
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import subprocess
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.numberOfTables = 1000
self.numberOfRecords = 100
def getBuildPath(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
for root, dirs, files in os.walk(projPath):
if ("taosd" in files):
rootRealPath = os.path.dirname(os.path.realpath(root))
if ("packaging" not in rootRealPath):
buildPath = root[:len(root) - len("/build/bin")]
break
return buildPath
def run(self):
tdSql.prepare()
buildPath = self.getBuildPath()
if (buildPath == ""):
tdLog.exit("taosd not found!")
else:
tdLog.info("taosd found in %s" % buildPath)
binPath = buildPath + "/build/bin/"
os.system("%staosdemo -y -t %d -n %d" %
(binPath, self.numberOfTables, self.numberOfRecords))
print("Sleep 2 seconds..")
time.sleep(2)
os.system('%staosdemo -f tools/query.json ' % binPath)
# taosdemoCmd = '%staosdemo -f tools/query.json ' % binPath
# threads = subprocess.check_output(
# taosdemoCmd, shell=True).decode("utf-8")
# print("threads: %d" % int(threads))
# if (int(threads) != 8):
# caller = inspect.getframeinfo(inspect.stack()[0][0])
# tdLog.exit(
# "%s(%d) failed: expected threads 8, actual %d" %
# (caller.filename, caller.lineno, int(threads)))
#
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
......@@ -100,7 +100,10 @@ function runSimCaseOneByOnefq {
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
rm -rf ../../sim/case.log
fi
exit 8
dohavecore $2
if [[ $2 == 1 ]];then
exit 8
fi
fi
end_time=`date +%s`
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log
......@@ -169,10 +172,13 @@ function runPyCaseOneByOnefq() {
out_log=`tail -1 pytest-out.log `
if [[ $out_log =~ 'failed' ]];then
cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" `
echo '=====================log====================='
echo '=====================log===================== '
cat ../../sim/case.log
rm -rf ../../sim/case.log
exit 8
dohavecore $2
if [[ $2 == 1 ]];then
exit 8
fi
fi
echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log
else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册