diff --git a/Jenkinsfile b/Jenkinsfile index 9544343bec6ef964fb15cf94c7a1a7c93d98810f..ebac32cb241af1a35556262690544f84ca94d9fc 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -104,6 +104,10 @@ pipeline { find pytest -name '*'sql|xargs rm -rf ./test-all.sh p2 date''' + sh ''' + cd ${WKC}/tests + ./test-all.sh b4fq + ''' } } stage('test_b1') { diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 96aeb9d60de1ab6fbaeebcb54e2da1ab316179f8..b7b3441bd18606d3b6444e508587f6552efc08ff 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -421,7 +421,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); // check if it is a sub-query of super table query first, if true, enter another routine - if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { + if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 6479a7ecba505c90025b66c41f8740651132cb66..280c8b7ec2b8887d161fce04e5c22efecc0c4e46 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -4599,7 +4599,7 @@ int32_t parseOrderbyClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQu const char* msg0 = "only support order by primary timestamp"; const char* msg1 = "invalid column name"; const char* msg2 = "only support order by primary timestamp or queried column"; - const char* msg3 = "only support order by primary timestamp or first tag in groupby clause"; + const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; setDefaultOrderInfo(pQueryInfo); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -5279,8 +5279,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn const char* msg0 = "soffset/offset can not be less than 0"; const char* msg1 = "slimit/soffset only available for STable query"; - const char* msg2 = "functions mixed up in table query"; - const char* msg3 = "slimit/soffset can not apply to projection query"; + const char* msg2 = "slimit/soffset can not apply to projection query"; // handle the limit offset value, validate the limit pQueryInfo->limit = pQuerySql->limit; @@ -5305,7 +5304,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn if (!tscQueryTags(pQueryInfo)) { // local handle the super table tag query if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } // for projection query on super table, all queries are subqueries @@ -5363,24 +5362,6 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn if (pQueryInfo->slimit.limit != -1 || pQueryInfo->slimit.offset != 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - - size_t size = taosArrayGetSize(pQueryInfo->exprList); - - bool hasTags = false; - bool hasOtherFunc = false; - // filter the query functions operating on "tbname" column that are not supported by normal columns. - for (int32_t i = 0; i < size; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pQueryInfo, i); - if (TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { - hasTags = true; - } else { - hasOtherFunc = true; - } - } - - if (hasTags && hasOtherFunc) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - } } return TSDB_CODE_SUCCESS; @@ -6282,10 +6263,12 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { const char* msg1 = "invalid table name"; + const char* msg2 = "functions not allowed in CQ"; const char* msg3 = "fill only available for interval query"; const char* msg4 = "fill option not supported in stream computing"; const char* msg5 = "sql too long"; // todo ADD support const char* msg6 = "from missing in subclause"; + const char* msg7 = "time interval is required"; SSqlCmd* pCmd = &pSql->cmd; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); @@ -6295,10 +6278,10 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); // if sql specifies db, use it, otherwise use default db - SStrToken* pzTableName = &(pCreateTable->name); + SStrToken* pName = &(pCreateTable->name); SQuerySQL* pQuerySql = pCreateTable->pSelect; - if (tscValidateName(pzTableName) != TSDB_CODE_SUCCESS) { + if (tscValidateName(pName) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } @@ -6337,15 +6320,19 @@ int32_t doCheckForStream(SSqlObj* pSql, SSqlInfo* pInfo) { // set interval value if (parseIntervalClause(pSql, pQueryInfo, pQuerySql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_SQL; - } else { - if ((pQueryInfo->interval.interval > 0) && - (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) { - return TSDB_CODE_TSC_INVALID_SQL; - } + } + + if ((pQueryInfo->interval.interval > 0) && + (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS)) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + if (!tscIsProjectionQuery(pQueryInfo) && pQueryInfo->interval.interval == 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } // set the created table[stream] name - code = tscSetTableFullName(pTableMetaInfo, pzTableName, pSql); + code = tscSetTableFullName(pTableMetaInfo, pName, pSql); if (code != TSDB_CODE_SUCCESS) { return code; } diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index c1ed9b0ba09f956207eeeccb2c7b8123538a2151..d1004fff62db5d58c0ae0ef45bd4571ee7368b20 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -65,44 +65,51 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in return retryDelta; } -static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { - SSqlStream *pStream = (SSqlStream *)pMsg->ahandle; - SSqlObj * pSql = pStream->pSql; +static void doLaunchQuery(void* param, TAOS_RES* tres, int32_t code) { + SSqlStream *pStream = (SSqlStream *)param; + assert(pStream->pSql == tres && code == TSDB_CODE_SUCCESS); - pSql->fp = tscProcessStreamQueryCallback; - pSql->fetchFp = tscProcessStreamQueryCallback; - pSql->param = pStream; + SSqlObj* pSql = (SSqlObj*) tres; + pSql->fp = doLaunchQuery; + pSql->fetchFp = doLaunchQuery; pSql->res.completed = false; - + SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - int code = tscGetTableMeta(pSql, pTableMetaInfo); - pSql->res.code = code; - + code = tscGetTableMeta(pSql, pTableMetaInfo); if (code == 0 && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { code = tscGetSTableVgroupInfo(pSql, 0); - pSql->res.code = code; } - // failed to get meter/metric meta, retry in 10sec. - if (code != TSDB_CODE_SUCCESS) { - int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision); - tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime); - tscSetRetryTimer(pStream, pSql, retryDelayTime); - - } else { + // failed to get table Meta or vgroup list, retry in 10sec. + if (code == TSDB_CODE_SUCCESS) { tscTansformSQLFuncForSTableQuery(pQueryInfo); - tscDebug("%p stream:%p start stream query on:%s", pSql, pStream, pTableMetaInfo->name); - tscDoQuery(pStream->pSql); + tscDebug("%p stream:%p, start stream query on:%s", pSql, pStream, pTableMetaInfo->name); + + pSql->fp = tscProcessStreamQueryCallback; + pSql->fetchFp = tscProcessStreamQueryCallback; + tscDoQuery(pSql); tscIncStreamExecutionCount(pStream); + } else if (code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) { + pSql->res.code = code; + int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision); + tscDebug("%p stream:%p, get table Meta failed, retry in %" PRId64 "ms", pSql, pStream, retryDelayTime); + tscSetRetryTimer(pStream, pSql, retryDelayTime); } } +static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) { + SSqlStream *pStream = (SSqlStream *)pMsg->ahandle; + doLaunchQuery(pStream, pStream->pSql, 0); +} + static void tscProcessStreamTimer(void *handle, void *tmrId) { SSqlStream *pStream = (SSqlStream *)handle; - if (pStream == NULL) return; - if (pStream->pTimer != tmrId) return; + if (pStream == NULL || pStream->pTimer != tmrId) { + return; + } + pStream->pTimer = NULL; pStream->numOfRes = 0; // reset the numOfRes. @@ -392,11 +399,16 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) { tscSetRetryTimer(pStream, pSql, timer); } -static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { +static int32_t tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { int64_t minIntervalTime = (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinIntervalTime * 1000L : tsMinIntervalTime; SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); + + if (!pStream->isProject && pQueryInfo->interval.interval == 0) { + sprintf(pSql->cmd.payload, "the interval value is 0"); + return -1; + } if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) { tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream, @@ -436,6 +448,8 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) { pQueryInfo->interval.interval = 0; // clear the interval value to avoid the force time window split by query processor pQueryInfo->interval.sliding = 0; } + + return TSDB_CODE_SUCCESS; } static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, int64_t stime) { @@ -485,34 +499,19 @@ static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) { return (pStream->precision == TSDB_TIME_PRECISION_MICRO) ? timer / 1000L : timer; } -static void setErrorInfo(SSqlObj* pSql, int32_t code, char* info) { - if (pSql == NULL) { - return; - } - - SSqlCmd* pCmd = &pSql->cmd; - - pSql->res.code = code; - - if (info != NULL) { - strncpy(pCmd->payload, info, pCmd->payloadLen); - } -} - static void tscCreateStream(void *param, TAOS_RES *res, int code) { SSqlStream* pStream = (SSqlStream*)param; SSqlObj* pSql = pStream->pSql; SSqlCmd* pCmd = &pSql->cmd; if (code != TSDB_CODE_SUCCESS) { - setErrorInfo(pSql, code, pCmd->payload); - tscError("%p open stream failed, sql:%s, reason:%s, code:0x%08x", pSql, pSql->sqlstr, pCmd->payload, code); + pSql->res.code = code; + tscError("%p open stream failed, sql:%s, reason:%s, code:%s", pSql, pSql->sqlstr, pCmd->payload, tstrerror(code)); + pStream->fp(pStream->param, NULL, NULL); return; } - registerSqlObj(pSql); - SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); @@ -523,13 +522,22 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { pStream->ctime = taosGetTimestamp(pStream->precision); pStream->etime = pQueryInfo->window.ekey; - tscAddIntoStreamList(pStream); + if (tscSetSlidingWindowInfo(pSql, pStream) != TSDB_CODE_SUCCESS) { + pSql->res.code = code; + + tscError("%p stream %p open failed, since the interval value is incorrect", pSql, pStream); + pStream->fp(pStream->param, NULL, NULL); + return; + } - tscSetSlidingWindowInfo(pSql, pStream); pStream->stime = tscGetStreamStartTimestamp(pSql, pStream, pStream->stime); int64_t starttime = tscGetLaunchTimestamp(pStream); pCmd->command = TSDB_SQL_SELECT; + + registerSqlObj(pSql); + tscAddIntoStreamList(pStream); + taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer); tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql, diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 681291d0db7039af09147d86f6db1fa48ccb233c..063b6af0e621a2e0609577b7a0a1ac75324ab047 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2094,6 +2094,13 @@ static SSqlObj *tscCreateSTableSubquery(SSqlObj *pSql, SRetrieveSupport *trsuppo } void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { + // the param may be null, since it may be done by other query threads. and the asyncOnError may enter in this + // function while kill query by a user. + if (param == NULL) { + assert(code != TSDB_CODE_SUCCESS); + return; + } + SRetrieveSupport *trsupport = (SRetrieveSupport *) param; SSqlObj* pParentSql = trsupport->pParentSql; diff --git a/src/connector/C#/TDengineDriver.cs b/src/connector/C#/TDengineDriver.cs index b6f143e1813d60c1ac4ae8356efdca4929c51345..205269501d376a4753b3aedbfa8d512b2df31600 100644 --- a/src/connector/C#/TDengineDriver.cs +++ b/src/connector/C#/TDengineDriver.cs @@ -19,136 +19,137 @@ using System.Runtime.InteropServices; namespace TDengineDriver { - enum TDengineDataType { - TSDB_DATA_TYPE_NULL = 0, // 1 bytes - TSDB_DATA_TYPE_BOOL = 1, // 1 bytes - TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes - TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes - TSDB_DATA_TYPE_INT = 4, // 4 bytes - TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes - TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes - TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes - TSDB_DATA_TYPE_BINARY = 8, // string - TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes - TSDB_DATA_TYPE_NCHAR = 10 // unicode string - } - - enum TDengineInitOption - { - TSDB_OPTION_LOCALE = 0, - TSDB_OPTION_CHARSET = 1, - TSDB_OPTION_TIMEZONE = 2, - TDDB_OPTION_CONFIGDIR = 3, - TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 - } - - class TDengineMeta - { - public string name; - public short size; - public byte type; - public string TypeName() + enum TDengineDataType { - switch ((TDengineDataType)type) - { - case TDengineDataType.TSDB_DATA_TYPE_BOOL: - return "BOOLEAN"; - case TDengineDataType.TSDB_DATA_TYPE_TINYINT: - return "BYTE"; - case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: - return "SHORT"; - case TDengineDataType.TSDB_DATA_TYPE_INT: - return "INT"; - case TDengineDataType.TSDB_DATA_TYPE_BIGINT: - return "LONG"; - case TDengineDataType.TSDB_DATA_TYPE_FLOAT: - return "FLOAT"; - case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: - return "DOUBLE"; - case TDengineDataType.TSDB_DATA_TYPE_BINARY: - return "STRING"; - case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: - return "TIMESTAMP"; - case TDengineDataType.TSDB_DATA_TYPE_NCHAR: - return "NCHAR"; - default: - return "undefine"; - } + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10 // unicode string } - } - class TDengine - { - public const int TSDB_CODE_SUCCESS = 0; + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } - [DllImport("taos.dll", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] - static extern public void Init(); + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOLEAN"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "BYTE"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SHORT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "LONG"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } - [DllImport("taos.dll", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] - static extern public void Cleanup(); + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; - [DllImport("taos.dll", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] - static extern public void Options(int option, string value); + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); - [DllImport("taos.dll", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); - [DllImport("taos.dll", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_errstr(IntPtr res); - static public string Error(IntPtr res) - { - IntPtr errPtr = taos_errstr(res); - return Marshal.PtrToStringAnsi(errPtr); - } + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); - [DllImport("taos.dll", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] - static extern public int ErrorNo(IntPtr res); + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); - [DllImport("taos.dll", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr Query(IntPtr conn, string sqlstr); + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } - [DllImport("taos.dll", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] - static extern public int AffectRows(IntPtr res); + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] - static extern public int FieldCount(IntPtr res); + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); - [DllImport("taos.dll", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] - static extern private IntPtr taos_fetch_fields(IntPtr res); - static public List FetchFields(IntPtr res) - { - const int fieldSize = 68; - - List metas = new List(); - if (res == IntPtr.Zero) - { - return metas; - } - - int fieldCount = FieldCount(res); - IntPtr fieldsPtr = taos_fetch_fields(res); - - for (int i = 0; i < fieldCount; ++i) - { - int offset = i * fieldSize; - - TDengineMeta meta = new TDengineMeta(); - meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); - meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); - meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); - metas.Add(meta); - } - - return metas; - } + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FetchRows(IntPtr res); + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); - [DllImport("taos.dll", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] - static extern public IntPtr FreeResult(IntPtr res); + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; - [DllImport("taos.dll", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] - static extern public int Close(IntPtr taos); - } -} \ No newline at end of file + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java index 284af3dfe75f7c436ec2ce875714afc235c525d8..19dabe07462b6481b2ab5460aad7ac5712e21b7c 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/DatabaseMetaDataTest.java @@ -7,7 +7,7 @@ import org.junit.Test; import java.sql.*; import java.util.Properties; -public class DatabaseMetaDataTest extends BaseTest { +public class DatabaseMetaDataTest { static Connection connection = null; static PreparedStatement statement = null; static String dbName = "test"; @@ -23,20 +23,21 @@ public class DatabaseMetaDataTest extends BaseTest { } Properties properties = new Properties(); properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host); + properties.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); String sql = "drop database if exists " + dbName; - statement = (TSDBPreparedStatement) connection.prepareStatement(sql); + statement = connection.prepareStatement(sql); statement.executeUpdate("create database if not exists " + dbName); statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)"); } @Test public void testMetaDataTest() throws SQLException { - DatabaseMetaData databaseMetaData = connection.getMetaData(); ResultSet resultSet = databaseMetaData.getTables(dbName, "t*", "t*", new String[]{"t"}); while (resultSet.next()) { @@ -180,7 +181,7 @@ public class DatabaseMetaDataTest extends BaseTest { databaseMetaData.getCatalogs(); // databaseMetaData.getTableTypes(); - databaseMetaData.getColumns("", "", "", ""); + databaseMetaData.getColumns(dbName, "", tName, ""); databaseMetaData.getColumnPrivileges("", "", "", ""); databaseMetaData.getTablePrivileges("", "", ""); databaseMetaData.getBestRowIdentifier("", "", "", 0, false); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java index 8de2e3b442b9b2ea1b84687878ba3bb4faf5e4d6..19bc5f713f9b406a943fc640fd03bb0503ed2967 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/AppMemoryLeakTest.java @@ -19,7 +19,7 @@ public class AppMemoryLeakTest { } } - @Test + @Test(expected = Exception.class) public void testCreateTooManyStatement() throws ClassNotFoundException, SQLException { Class.forName("com.taosdata.jdbc.TSDBDriver"); int stmtCnt = 0; @@ -30,15 +30,4 @@ public class AppMemoryLeakTest { } } - public static void main(String[] args) throws ClassNotFoundException, SQLException { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - int stmtCnt = 0; - Connection conn = DriverManager.getConnection("jdbc:TAOS://localhost:6030/?user=root&password=taosdata"); - while (true) { - Statement stmt = conn.createStatement(); - System.out.println(++stmtCnt + " : " + stmt); - } - } - - } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java new file mode 100644 index 0000000000000000000000000000000000000000..e4d2d7598d6a6bd5610abcb1bd576f523cb22740 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/ConnectWrongDatabaseTest.java @@ -0,0 +1,24 @@ +package com.taosdata.jdbc.cases; + +import org.junit.Assert; +import org.junit.Test; + +import java.sql.DriverManager; +import java.sql.SQLException; + +public class ConnectWrongDatabaseTest { + + @Test + public void connect() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + DriverManager.getConnection("jdbc:TAOS://localhost:6030/wrong_db?user=root&password=taosdata"); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } catch (SQLException e) { + System.out.println(e.getMessage()); + Assert.assertEquals("TDengine Error: Invalid database name", e.getMessage()); + } + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java index fb570e16c4e0ef8036ccd2c29cdc51e1938b2139..ce84f967d09e2217bac90cc7a3f59bf4f50b2a15 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/SqlSyntaxValidatorTest.java @@ -5,10 +5,6 @@ import org.junit.Test; public class SqlSyntaxValidatorTest { - @Test - public void validateSqlSyntax() { - } - @Test public void isSelectSQL() { Assert.assertTrue(SqlSyntaxValidator.isSelectSql("select * from test.weather")); diff --git a/src/connector/nodejs/nodetaos/cinterface.js b/src/connector/nodejs/nodetaos/cinterface.js index 995babdb2b7827267d6a4b68cd9d380671823543..a246256f15f313b61c0b7e990e63848fee2431bf 100644 --- a/src/connector/nodejs/nodetaos/cinterface.js +++ b/src/connector/nodejs/nodetaos/cinterface.js @@ -4,6 +4,7 @@ */ const ref = require('ref'); +const os = require('os'); const ffi = require('ffi'); const ArrayType = require('ref-array'); const Struct = require('ref-struct'); @@ -188,7 +189,13 @@ function CTaosInterface (config = null, pass = false) { ref.types.void_ptr2 = ref.refType(ref.types.void_ptr); /*Declare a bunch of functions first*/ /* Note, pointers to TAOS_RES, TAOS, are ref.types.void_ptr. The connection._conn buffer is supplied for pointers to TAOS * */ - this.libtaos = ffi.Library('libtaos', { + + if ('win32' == os.platform()) { + taoslibname = 'taos'; + } else { + taoslibname = 'libtaos'; + } + this.libtaos = ffi.Library(taoslibname, { 'taos_options': [ ref.types.int, [ ref.types.int , ref.types.void_ptr ] ], 'taos_init': [ ref.types.void, [ ] ], //TAOS *taos_connect(char *ip, char *user, char *pass, char *db, int port) diff --git a/src/connector/nodejs/package.json b/src/connector/nodejs/package.json index 2d5cf45e1db9a65ce86d998afea6cd689bf63317..5587a69e01e6743a7eb7ef234d111c86db246841 100644 --- a/src/connector/nodejs/package.json +++ b/src/connector/nodejs/package.json @@ -1,6 +1,6 @@ { "name": "td2.0-connector", - "version": "2.0.4", + "version": "2.0.5", "description": "A Node.js connector for TDengine.", "main": "tdengine.js", "scripts": { diff --git a/src/dnode/inc/dnodeModule.h b/src/dnode/inc/dnodeModule.h index e645784c8fd7aced259185f45402f9839842b9aa..863ea433c46c175ee7cb6ad46e6dd38f164d36b1 100644 --- a/src/dnode/inc/dnodeModule.h +++ b/src/dnode/inc/dnodeModule.h @@ -23,8 +23,7 @@ extern "C" { int32_t dnodeInitModules(); void dnodeCleanupModules(); -bool dnodeStartMnode(SMInfos *pMinfos); -void dnodeProcessModuleStatus(uint32_t moduleStatus); +int32_t dnodeStartMnode(SMInfos *pMinfos); #ifdef __cplusplus } diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c index 62de85445c2e6756094dea1ac6e7e6a5880f7e18..a661585b3b39df986ac7866a255472e47e789fe6 100644 --- a/src/dnode/src/dnodeModule.c +++ b/src/dnode/src/dnodeModule.c @@ -127,14 +127,16 @@ int32_t dnodeInitModules() { return dnodeStartModules(); } -void dnodeProcessModuleStatus(uint32_t moduleStatus) { +int32_t dnodeProcessModuleStatus(uint32_t moduleStatus) { + int32_t code = 0; + for (int32_t module = TSDB_MOD_MNODE; module < TSDB_MOD_HTTP; ++module) { bool enableModule = moduleStatus & (1 << module); if (!tsModule[module].enable && enableModule) { dInfo("module status:%u is set, start %s module", moduleStatus, tsModule[module].name); tsModule[module].enable = true; dnodeSetModuleStatus(module); - (*tsModule[module].startFp)(); + code = (*tsModule[module].startFp)(); } if (tsModule[module].enable && !enableModule) { @@ -144,21 +146,29 @@ void dnodeProcessModuleStatus(uint32_t moduleStatus) { (*tsModule[module].stopFp)(); } } -} -bool dnodeStartMnode(SMInfos *pMinfos) { - SMInfos *pMnodes = pMinfos; + return code; +} +int32_t dnodeStartMnode(SMInfos *pMinfos) { if (tsModuleStatus & (1 << TSDB_MOD_MNODE)) { dDebug("mnode module is already started, module status:%d", tsModuleStatus); - return false; + return 0; } uint32_t moduleStatus = tsModuleStatus | (1 << TSDB_MOD_MNODE); dInfo("start mnode module, module status:%d, new status:%d", tsModuleStatus, moduleStatus); - dnodeProcessModuleStatus(moduleStatus); - sdbUpdateSync(pMnodes); + int32_t code = dnodeProcessModuleStatus(moduleStatus); + if (code == 0) { + code = sdbUpdateSync(pMinfos); + } + + if (code != 0) { + dError("failed to start mnode module since %s", tstrerror(code)); + moduleStatus = tsModuleStatus & ~(1 << TSDB_MOD_MNODE); + dnodeProcessModuleStatus(moduleStatus); + } - return true; + return code; } diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c index bc24d1bf623ec014dd4a4ad35442218549aaf335..4a3d6d9a84f7918c8cbbc40cd80b074ff164cf85 100644 --- a/src/dnode/src/dnodeVMgmt.c +++ b/src/dnode/src/dnodeVMgmt.c @@ -214,7 +214,5 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { dDebug("mnode index:%d, mnode:%d:%s", i, pCfg->mnodes.mnodeInfos[i].mnodeId, pCfg->mnodes.mnodeInfos[i].mnodeEp); } - dnodeStartMnode(&pCfg->mnodes); - - return TSDB_CODE_SUCCESS; + return dnodeStartMnode(&pCfg->mnodes); } diff --git a/src/inc/dnode.h b/src/inc/dnode.h index 877738778b022c9c7d38a3801beb5cdc86ff9f4d..5ecaf19f61a022bae849c2f946acb0ee693aeb59 100644 --- a/src/inc/dnode.h +++ b/src/inc/dnode.h @@ -40,7 +40,7 @@ void dnodeGetClusterId(char *clusterId); void dnodeUpdateEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port); bool dnodeCheckEpChanged(int32_t dnodeId, char *epstr); -bool dnodeStartMnode(SMInfos *pMinfos); +int32_t dnodeStartMnode(SMInfos *pMinfos); void dnodeAddClientRspHandle(uint8_t msgType, void (*fp)(SRpcMsg *rpcMsg)); void dnodeSendMsgToDnode(SRpcEpSet *epSet, SRpcMsg *rpcMsg); diff --git a/src/inc/mnode.h b/src/inc/mnode.h index 800d767eedac3d4680f74564db00e7645917d13b..2495a42ba2e5d23cb361e2d64de04d1f710764ea 100644 --- a/src/inc/mnode.h +++ b/src/inc/mnode.h @@ -65,7 +65,7 @@ int32_t mnodeStartSystem(); void mnodeCleanupSystem(); void mnodeStopSystem(); void sdbUpdateAsync(); -void sdbUpdateSync(void *pMnodes); +int32_t sdbUpdateSync(void *pMnodes); bool mnodeIsRunning(); int32_t mnodeProcessRead(SMnodeMsg *pMsg); int32_t mnodeProcessWrite(SMnodeMsg *pMsg); diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index 641b65749937438f14744355136b9b6f0aecef3f..ed88bc15ee240ae7f87eeb2c5f0f665f21128e75 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -126,6 +126,11 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SHOWOBJ, 0, 0x030B, "Data expir TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_QUERY_ID, 0, 0x030C, "Invalid query id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_STREAM_ID, 0, 0x030D, "Invalid stream id") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_CONN_ID, 0, 0x030E, "Invalid connection id") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_MNODE_IS_RUNNING, 0, 0x0310, "mnode is alreay running") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC, 0, 0x0311, "failed to config sync") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_START_SYNC, 0, 0x0312, "failed to start sync") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_CREATE_DIR, 0, 0x0313, "failed to create mnode dir") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FAILED_TO_INIT_STEP, 0, 0x0314, "failed to init components") TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE, 0, 0x0320, "Object already there") TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_ERROR, 0, 0x0321, "Unexpected generic error in sdb") diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index 6e001f4dfbb64c9d73581fef624b4b763b39d50c..7ef0488c420dd470c3afc5d7ca8ac7a518ccdc73 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -74,13 +74,13 @@ static int32_t mnodeInitComponents() { int32_t mnodeStartSystem() { if (tsMgmtIsRunning) { mInfo("mnode module already started..."); - return 0; + return TSDB_CODE_SUCCESS; } mInfo("starting to initialize mnode ..."); if (mkdir(tsMnodeDir, 0755) != 0 && errno != EEXIST) { mError("failed to init mnode dir:%s, reason:%s", tsMnodeDir, strerror(errno)); - return -1; + return TSDB_CODE_MND_FAILED_TO_CREATE_DIR; } dnodeAllocMWritequeue(); @@ -88,7 +88,7 @@ int32_t mnodeStartSystem() { dnodeAllocateMPeerQueue(); if (mnodeInitComponents() != 0) { - return -1; + return TSDB_CODE_MND_FAILED_TO_INIT_STEP; } dnodeReportStep("mnode-grant", "start to set grant infomation", 0); @@ -99,7 +99,7 @@ int32_t mnodeStartSystem() { sdbUpdateSync(NULL); - return 0; + return TSDB_CODE_SUCCESS; } int32_t mnodeInitSystem() { diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 6573de2987ef0c134be9a29563fda602f01f6e00..9d2bfe0ce15f2579bdffd95095ca235bdadff286 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -318,11 +318,11 @@ void sdbUpdateAsync() { taosTmrReset(sdbUpdateSyncTmrFp, 200, NULL, tsMnodeTmr, &tsSdbTmr); } -void sdbUpdateSync(void *pMnodes) { +int32_t sdbUpdateSync(void *pMnodes) { SMInfos *pMinfos = pMnodes; if (!mnodeIsRunning()) { mDebug("vgId:1, mnode not start yet, update sync config later"); - return; + return TSDB_CODE_MND_MNODE_IS_RUNNING; } mDebug("vgId:1, update sync config, pMnodes:%p", pMnodes); @@ -377,12 +377,12 @@ void sdbUpdateSync(void *pMnodes) { if (!hasThisDnode) { sdbDebug("vgId:1, update sync config, this dnode not exist"); - return; + return TSDB_CODE_MND_FAILED_TO_CONFIG_SYNC; } if (memcmp(&syncCfg, &tsSdbMgmt.cfg, sizeof(SSyncCfg)) == 0) { sdbDebug("vgId:1, update sync config, info not changed"); - return; + return TSDB_CODE_SUCCESS; } sdbInfo("vgId:1, work as mnode, replica:%d", syncCfg.replica); @@ -407,12 +407,15 @@ void sdbUpdateSync(void *pMnodes) { tsSdbMgmt.cfg = syncCfg; if (tsSdbMgmt.sync) { - syncReconfig(tsSdbMgmt.sync, &syncCfg); + int32_t code = syncReconfig(tsSdbMgmt.sync, &syncCfg); + if (code != 0) return code; } else { tsSdbMgmt.sync = syncStart(&syncInfo); + if (tsSdbMgmt.sync <= 0) return TSDB_CODE_MND_FAILED_TO_START_SYNC; } sdbUpdateMnodeRoles(); + return TSDB_CODE_SUCCESS; } int32_t sdbInitRef() { diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 32a68549fabd73104c56ea89a705f7cf5a6a30d1..c3d60e21dc59ccbbf9940c4a155ee29d57d2f8a2 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -152,7 +152,7 @@ typedef struct SQuery { int16_t precision; int16_t numOfOutput; int16_t fillType; - int16_t checkBuffer; // check if the buffer is full during scan each block + int16_t checkResultBuf; // check if the buffer is full during scan each block SLimitVal limit; int32_t rowSize; SSqlGroupbyExpr* pGroupbyExpr; diff --git a/src/query/inc/qTsbuf.h b/src/query/inc/qTsbuf.h index 90bd64336fdeed91deb68b9b490224a7fb29bc80..5d055782c9b82a1444c97a62d429cc2ba9a53986 100644 --- a/src/query/inc/qTsbuf.h +++ b/src/query/inc/qTsbuf.h @@ -88,6 +88,7 @@ typedef struct STSBuf { STSList tsData; // uncompressed raw ts data uint64_t numOfTotal; bool autoDelete; + bool remainOpen; int32_t tsOrder; // order of timestamp in ts comp buffer STSCursor cur; } STSBuf; diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 2997d56326b664488a1942df0150343bb5b0f6f9..4b4c8b1426e027b508af846fbfe3914989ce7c0d 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3836,8 +3836,10 @@ static void ts_comp_finalize(SQLFunctionCtx *pCtx) { STSBuf * pTSbuf = pInfo->pTSBuf; tsBufFlush(pTSbuf); - strcpy(pCtx->aOutputBuf, pTSbuf->path); + *(FILE **)pCtx->aOutputBuf = pTSbuf->f; + + pTSbuf->remainOpen = true; tsBufDestroy(pTSbuf); doFinalizer(pCtx); } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 05487d435d933d019fda5fbe1551a5a2118fad22..fc7b3bdd6d115920a8ab693b38b6ff56af54e326 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -758,7 +758,12 @@ static void doUpdateResultRowIndex(SResultRowInfo*pResultRowInfo, TSKEY lastKey, } } - pResultRowInfo->curIndex = i + 1; // current not closed result object + if (i == pResultRowInfo->size - 1) { + pResultRowInfo->curIndex = i; + } else { + pResultRowInfo->curIndex = i + 1; // current not closed result object + } + pResultRowInfo->prevSKey = pResultRowInfo->pResult[pResultRowInfo->curIndex]->win.skey; } } @@ -1708,7 +1713,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl numOfRes = (int32_t) getNumOfResult(pRuntimeEnv); // update the number of output result - if (numOfRes > 0 && pQuery->checkBuffer == 1) { + if (numOfRes > 0 && pQuery->checkResultBuf == 1) { assert(numOfRes >= pQuery->rec.rows); pQuery->rec.rows = numOfRes; @@ -2005,6 +2010,7 @@ static void doFreeQueryHandle(SQInfo* pQInfo) { assert(pMemRef->ref == 0 && pMemRef->imem == NULL && pMemRef->mem == NULL); } + static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { if (pRuntimeEnv->pQuery == NULL) { return; @@ -2016,6 +2022,16 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { qDebug("QInfo:%p teardown runtime env", pQInfo); cleanupResultRowInfo(&pRuntimeEnv->windowResInfo); + if (isTSCompQuery(pQuery)) { + FILE *f = *(FILE **)pQuery->sdata[0]->data; + + if (f) { + fclose(f); + *(FILE **)pQuery->sdata[0]->data = NULL; + } + } + + if (pRuntimeEnv->pCtx != NULL) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { SQLFunctionCtx *pCtx = &pRuntimeEnv->pCtx[i]; @@ -2222,9 +2238,9 @@ void getAlignQueryTimeWindow(SQuery *pQuery, int64_t key, int64_t keyFirst, int6 static void setScanLimitationByResultBuffer(SQuery *pQuery) { if (isTopBottomQuery(pQuery)) { - pQuery->checkBuffer = 0; + pQuery->checkResultBuf = 0; } else if (isGroupbyNormalCol(pQuery->pGroupbyExpr)) { - pQuery->checkBuffer = 0; + pQuery->checkResultBuf = 0; } else { bool hasMultioutput = false; for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { @@ -2239,7 +2255,7 @@ static void setScanLimitationByResultBuffer(SQuery *pQuery) { } } - pQuery->checkBuffer = hasMultioutput ? 1 : 0; + pQuery->checkResultBuf = hasMultioutput ? 1 : 0; } } @@ -2911,7 +2927,7 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { SQInfo* pQInfo = GET_QINFO_ADDR(pRuntimeEnv); SExprInfo *pExprInfo = &pQuery->pExpr1[0]; - if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP) { + if (pQuery->numOfOutput == 1 && pExprInfo->base.functionId == TSDB_FUNC_TS_COMP && pRuntimeEnv->stableQuery) { assert(pExprInfo->base.numOfParams == 1); int16_t tagColId = (int16_t)pExprInfo->base.arg->argValue.i64; @@ -3674,6 +3690,10 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { SQueryStatusInfo qstatus = getQueryStatusInfo(pRuntimeEnv, start); SET_MASTER_SCAN_FLAG(pRuntimeEnv); + if (!pRuntimeEnv->groupbyColumn && pRuntimeEnv->hasTagResults) { + setTagVal(pRuntimeEnv, pTableQueryInfo->pTable, pQInfo->tsdb); + } + while (1) { doScanAllDataBlocks(pRuntimeEnv); @@ -4757,20 +4777,21 @@ static void enableExecutionForNextTable(SQueryRuntimeEnv *pRuntimeEnv) { } } +// TODO refactor: setAdditionalInfo static FORCE_INLINE void setEnvForEachBlock(SQInfo* pQInfo, STableQueryInfo* pTableQueryInfo, SDataBlockInfo* pBlockInfo) { SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; SQuery* pQuery = pQInfo->runtimeEnv.pQuery; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); - if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { - setExecutionContext(pQInfo, pTableQueryInfo->groupIndex, pBlockInfo->window.ekey + step); - } else { // interval query + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { TSKEY nextKey = pBlockInfo->window.skey; setIntervalQueryRange(pQInfo, nextKey); if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) { setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo); } + } else { // non-interval query + setExecutionContext(pQInfo, pTableQueryInfo->groupIndex, pBlockInfo->window.ekey + step); } } @@ -5626,8 +5647,6 @@ static void tableAggregationProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) return; } - pQuery->current = pTableInfo; // set current query table info - scanOneTableDataBlocks(pRuntimeEnv, pTableInfo->lastKey); finalizeQueryResult(pRuntimeEnv); @@ -5646,10 +5665,8 @@ static void tableAggregationProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { SQueryRuntimeEnv *pRuntimeEnv = &pQInfo->runtimeEnv; - SQuery *pQuery = pRuntimeEnv->pQuery; - pQuery->current = pTableInfo; - // for ts_comp query, re-initialized is not allowed + SQuery *pQuery = pRuntimeEnv->pQuery; if (!isTSCompQuery(pQuery)) { resetDefaultResInfoOutputBuf(pRuntimeEnv); } @@ -5701,9 +5718,7 @@ static void tableProjectionProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) // handle time interval query on table static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { SQueryRuntimeEnv *pRuntimeEnv = &(pQInfo->runtimeEnv); - SQuery *pQuery = pRuntimeEnv->pQuery; - pQuery->current = pTableInfo; TSKEY newStartKey = QUERY_IS_ASC_QUERY(pQuery)? INT64_MIN:INT64_MAX; @@ -5773,7 +5788,6 @@ static void tableQueryImpl(SQInfo *pQInfo) { } qDebug("QInfo:%p current:%" PRId64 " returned, total:%" PRId64, pQInfo, pQuery->rec.rows, pQuery->rec.total); - return; } else { pQuery->rec.rows = 0; assert(pRuntimeEnv->windowResInfo.size > 0); @@ -5791,9 +5805,9 @@ static void tableQueryImpl(SQInfo *pQInfo) { if (pQuery->rec.rows <= 0 || pRuntimeEnv->windowResInfo.size <= pQInfo->groupIndex) { qDebug("QInfo:%p query over, %" PRId64 " rows are returned", pQInfo, pQuery->rec.total); } - - return; } + + return; } // number of points returned during this query @@ -5802,7 +5816,9 @@ static void tableQueryImpl(SQInfo *pQInfo) { assert(pQInfo->tableqinfoGroupInfo.numOfTables == 1); SArray* g = GET_TABLEGROUP(pQInfo, 0); + STableQueryInfo* item = taosArrayGetP(g, 0); + pQuery->current = item; // group by normal column, sliding window query, interval query are handled by interval query processor if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyColumn) { // interval (down sampling operation) @@ -5810,7 +5826,7 @@ static void tableQueryImpl(SQInfo *pQInfo) { } else if (isFixedOutputQuery(pRuntimeEnv)) { tableAggregationProcess(pQInfo, item); } else { // diff/add/multiply/subtract/division - assert(pQuery->checkBuffer == 1); + assert(pQuery->checkResultBuf == 1); tableProjectionProcess(pQInfo, item); } @@ -5830,7 +5846,7 @@ static void stableQueryImpl(SQInfo *pQInfo) { (isFixedOutputQuery(pRuntimeEnv) && (!isPointInterpoQuery(pQuery)) && (!pRuntimeEnv->groupbyColumn))) { multiTableQueryProcess(pQInfo); } else { - assert((pQuery->checkBuffer == 1 && pQuery->interval.interval == 0) || isPointInterpoQuery(pQuery) || + assert((pQuery->checkResultBuf == 1 && pQuery->interval.interval == 0) || isPointInterpoQuery(pQuery) || pRuntimeEnv->groupbyColumn); sequentialTableProcess(pQInfo); @@ -6944,10 +6960,10 @@ static size_t getResultSize(SQInfo *pQInfo, int64_t *numOfRows) { * TODO handle the case that the file is too large to send back one time */ if (isTSCompQuery(pQuery) && (*numOfRows) > 0) { - struct stat fstat; - if (stat(pQuery->sdata[0]->data, &fstat) == 0) { - *numOfRows = fstat.st_size; - return fstat.st_size; + struct stat fStat; + if (fstat(fileno(*(FILE **)pQuery->sdata[0]->data), &fStat) == 0) { + *numOfRows = fStat.st_size; + return fStat.st_size; } else { qError("QInfo:%p failed to get file info, path:%s, reason:%s", pQInfo, pQuery->sdata[0]->data, strerror(errno)); return 0; @@ -6963,15 +6979,16 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { // load data from file to msg buffer if (isTSCompQuery(pQuery)) { - int32_t fd = open(pQuery->sdata[0]->data, O_RDONLY, 0666); + + FILE *f = *(FILE **)pQuery->sdata[0]->data; // make sure file exist - if (FD_VALID(fd)) { - uint64_t s = lseek(fd, 0, SEEK_END); + if (f) { + off_t s = lseek(fileno(f), 0, SEEK_END); - qDebug("QInfo:%p ts comp data return, file:%s, size:%"PRId64, pQInfo, pQuery->sdata[0]->data, s); - if (lseek(fd, 0, SEEK_SET) >= 0) { - size_t sz = read(fd, data, (uint32_t) s); + qDebug("QInfo:%p ts comp data return, file:%p, size:%"PRId64, pQInfo, f, s); + if (fseek(f, 0, SEEK_SET) >= 0) { + size_t sz = fread(data, 1, s, f); if(sz < s) { // todo handle error assert(0); } @@ -6979,15 +6996,8 @@ static int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { UNUSED(s); } - close(fd); - unlink(pQuery->sdata[0]->data); - } else { - // todo return the error code to client and handle invalid fd - qError("QInfo:%p failed to open tmp file to send ts-comp data to client, path:%s, reason:%s", pQInfo, - pQuery->sdata[0]->data, strerror(errno)); - if (fd != -1) { - close(fd); - } + fclose(f); + *(FILE **)pQuery->sdata[0]->data = NULL; } // all data returned, set query over diff --git a/src/query/src/qTsbuf.c b/src/query/src/qTsbuf.c index d0c59fe5efe08aa1c9dbe3bbe65db139b074ebee..a5d4690a8e101ccd87985ace36b2abbe1412a2e2 100644 --- a/src/query/src/qTsbuf.c +++ b/src/query/src/qTsbuf.c @@ -19,6 +19,8 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order) { if (pTSBuf == NULL) { return NULL; } + + pTSBuf->autoDelete = autoDelete; taosGetTmpfilePath("join", pTSBuf->path); pTSBuf->f = fopen(pTSBuf->path, "w+"); @@ -26,6 +28,10 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order) { free(pTSBuf); return NULL; } + + if (!autoDelete) { + unlink(pTSBuf->path); + } if (NULL == allocResForTSBuf(pTSBuf)) { return NULL; @@ -37,8 +43,7 @@ STSBuf* tsBufCreate(bool autoDelete, int32_t order) { tsBufResetPos(pTSBuf); pTSBuf->cur.order = TSDB_ORDER_ASC; - - pTSBuf->autoDelete = autoDelete; + pTSBuf->tsOrder = order; return pTSBuf; @@ -49,6 +54,8 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { if (pTSBuf == NULL) { return NULL; } + + pTSBuf->autoDelete = autoDelete; tstrncpy(pTSBuf->path, path, sizeof(pTSBuf->path)); @@ -129,7 +136,6 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { // ascending by default pTSBuf->cur.order = TSDB_ORDER_ASC; - pTSBuf->autoDelete = autoDelete; // tscDebug("create tsBuf from file:%s, fd:%d, size:%d, numOfGroups:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f), // pTSBuf->fileSize, pTSBuf->numOfGroups, pTSBuf->autoDelete); @@ -147,8 +153,10 @@ void* tsBufDestroy(STSBuf* pTSBuf) { tfree(pTSBuf->pData); tfree(pTSBuf->block.payload); - - fclose(pTSBuf->f); + + if (!pTSBuf->remainOpen) { + fclose(pTSBuf->f); + } if (pTSBuf->autoDelete) { // ("tsBuf %p destroyed, delete tmp file:%s", pTSBuf, pTSBuf->path); diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 436f4de0988aa87836172b8c0284ca767d27c5d8..db100250a8570ec130f89856ce8fa55d493aeb13 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -375,6 +375,8 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) { } int32_t syncForwardToPeer(int64_t rid, void *data, void *mhandle, int32_t qtype) { + if (rid <= 0) return 0; + SSyncNode *pNode = syncAcquireNode(rid); if (pNode == NULL) return 0; diff --git a/tests/examples/C#/taosdemo/README.md b/tests/examples/C#/taosdemo/README.md index 09e1d659b74f22670c6bfe1e6fa85c7a457d6b13..82a8dc674af9ecd83d6e777a0e2739cabf598536 100644 --- a/tests/examples/C#/taosdemo/README.md +++ b/tests/examples/C#/taosdemo/README.md @@ -10,12 +10,14 @@ run C# version taosdemo === Usage: mono taosdemo.exe [OPTION...] + --help Show usage. + -h host, The host to connect to TDengine. Default is localhost. -p port, The TCP/IP port number to use for the connection. Default is 0. -u user, The user name to use when connecting to the server. Default is 'root'. -P password, The password to use when connecting to the server. Default is 'taosdata'. -d database, Destination database. Default is 'test'. - -a replica, Set the replica parameters of the database, Default 1, min: 1, max: 3. + -a replica, Set the replica parameters of the database, Default 1, min: 1, max: 5. -m table_prefix, Table prefix name. Default is 't'. -s sql file, The select sql file. -M stable, Use super table. @@ -26,8 +28,8 @@ Usage: mono taosdemo.exe [OPTION...] -l num_of_cols_per_record, The number of columns per record. Default is 3. -T num_of_threads, The number of threads. Default is 10. -r num_of_records_per_req, The number of records per request. Default is 1000. - -t num_of_tables, The number of tables. Default is 10000. - -n num_of_records_per_table, The number of records per table. Default is 10000. + -t num_of_tables, The number of tables. Default is 1. + -n num_of_records_per_table, The number of records per table. Default is 1. -c config_directory, Configuration directory. Default is '/etc/taos/'. -x flag, Insert only flag. -O order, Insert mode--0: In order, 1: Out of order. Default is in order. diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs deleted file mode 120000 index 9bee9fb271a995d8ecec6a07fac2861b2a6f200f..0000000000000000000000000000000000000000 --- a/tests/examples/C#/taosdemo/TDengineDriver.cs +++ /dev/null @@ -1 +0,0 @@ -../../../../src/connector/C#/TDengineDriver.cs \ No newline at end of file diff --git a/tests/examples/C#/taosdemo/TDengineDriver.cs b/tests/examples/C#/taosdemo/TDengineDriver.cs new file mode 100644 index 0000000000000000000000000000000000000000..205269501d376a4753b3aedbfa8d512b2df31600 --- /dev/null +++ b/tests/examples/C#/taosdemo/TDengineDriver.cs @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +using System; +using System.Collections.Generic; +using System.Runtime.InteropServices; + +namespace TDengineDriver +{ + enum TDengineDataType + { + TSDB_DATA_TYPE_NULL = 0, // 1 bytes + TSDB_DATA_TYPE_BOOL = 1, // 1 bytes + TSDB_DATA_TYPE_TINYINT = 2, // 1 bytes + TSDB_DATA_TYPE_SMALLINT = 3, // 2 bytes + TSDB_DATA_TYPE_INT = 4, // 4 bytes + TSDB_DATA_TYPE_BIGINT = 5, // 8 bytes + TSDB_DATA_TYPE_FLOAT = 6, // 4 bytes + TSDB_DATA_TYPE_DOUBLE = 7, // 8 bytes + TSDB_DATA_TYPE_BINARY = 8, // string + TSDB_DATA_TYPE_TIMESTAMP = 9,// 8 bytes + TSDB_DATA_TYPE_NCHAR = 10 // unicode string + } + + enum TDengineInitOption + { + TSDB_OPTION_LOCALE = 0, + TSDB_OPTION_CHARSET = 1, + TSDB_OPTION_TIMEZONE = 2, + TDDB_OPTION_CONFIGDIR = 3, + TDDB_OPTION_SHELL_ACTIVITY_TIMER = 4 + } + + class TDengineMeta + { + public string name; + public short size; + public byte type; + public string TypeName() + { + switch ((TDengineDataType)type) + { + case TDengineDataType.TSDB_DATA_TYPE_BOOL: + return "BOOLEAN"; + case TDengineDataType.TSDB_DATA_TYPE_TINYINT: + return "BYTE"; + case TDengineDataType.TSDB_DATA_TYPE_SMALLINT: + return "SHORT"; + case TDengineDataType.TSDB_DATA_TYPE_INT: + return "INT"; + case TDengineDataType.TSDB_DATA_TYPE_BIGINT: + return "LONG"; + case TDengineDataType.TSDB_DATA_TYPE_FLOAT: + return "FLOAT"; + case TDengineDataType.TSDB_DATA_TYPE_DOUBLE: + return "DOUBLE"; + case TDengineDataType.TSDB_DATA_TYPE_BINARY: + return "STRING"; + case TDengineDataType.TSDB_DATA_TYPE_TIMESTAMP: + return "TIMESTAMP"; + case TDengineDataType.TSDB_DATA_TYPE_NCHAR: + return "NCHAR"; + default: + return "undefine"; + } + } + } + + class TDengine + { + public const int TSDB_CODE_SUCCESS = 0; + + [DllImport("taos", EntryPoint = "taos_init", CallingConvention = CallingConvention.Cdecl)] + static extern public void Init(); + + [DllImport("taos", EntryPoint = "taos_cleanup", CallingConvention = CallingConvention.Cdecl)] + static extern public void Cleanup(); + + [DllImport("taos", EntryPoint = "taos_options", CallingConvention = CallingConvention.Cdecl)] + static extern public void Options(int option, string value); + + [DllImport("taos", EntryPoint = "taos_connect", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Connect(string ip, string user, string password, string db, short port); + + [DllImport("taos", EntryPoint = "taos_errstr", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_errstr(IntPtr res); + static public string Error(IntPtr res) + { + IntPtr errPtr = taos_errstr(res); + return Marshal.PtrToStringAnsi(errPtr); + } + + [DllImport("taos", EntryPoint = "taos_errno", CallingConvention = CallingConvention.Cdecl)] + static extern public int ErrorNo(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_query", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr Query(IntPtr conn, string sqlstr); + + [DllImport("taos", EntryPoint = "taos_affected_rows", CallingConvention = CallingConvention.Cdecl)] + static extern public int AffectRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_field_count", CallingConvention = CallingConvention.Cdecl)] + static extern public int FieldCount(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_fetch_fields", CallingConvention = CallingConvention.Cdecl)] + static extern private IntPtr taos_fetch_fields(IntPtr res); + static public List FetchFields(IntPtr res) + { + const int fieldSize = 68; + + List metas = new List(); + if (res == IntPtr.Zero) + { + return metas; + } + + int fieldCount = FieldCount(res); + IntPtr fieldsPtr = taos_fetch_fields(res); + + for (int i = 0; i < fieldCount; ++i) + { + int offset = i * fieldSize; + + TDengineMeta meta = new TDengineMeta(); + meta.name = Marshal.PtrToStringAnsi(fieldsPtr + offset); + meta.type = Marshal.ReadByte(fieldsPtr + offset + 65); + meta.size = Marshal.ReadInt16(fieldsPtr + offset + 66); + metas.Add(meta); + } + + return metas; + } + + [DllImport("taos", EntryPoint = "taos_fetch_row", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FetchRows(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_free_result", CallingConvention = CallingConvention.Cdecl)] + static extern public IntPtr FreeResult(IntPtr res); + + [DllImport("taos", EntryPoint = "taos_close", CallingConvention = CallingConvention.Cdecl)] + static extern public int Close(IntPtr taos); + } +} diff --git a/tests/examples/C#/taosdemo/taosdemo.cs b/tests/examples/C#/taosdemo/taosdemo.cs index 8e48fa2c8f2d238b7aebf8cba6bf455cb6b890be..7e7c18db26c49c6c7a349fe497d6c9c792756a0c 100644 --- a/tests/examples/C#/taosdemo/taosdemo.cs +++ b/tests/examples/C#/taosdemo/taosdemo.cs @@ -26,10 +26,10 @@ namespace TDengineDriver class TDengineTest { //connect parameters - private string host; - private string configDir; - private string user; - private string password; + private string host = "127.0.0.1"; + private string configDir = "C:/TDengine/cfg"; + private string user = "root"; + private string password = "taosdata"; private short port = 0; //sql parameters @@ -40,11 +40,12 @@ namespace TDengineDriver private bool isInsertOnly = false; private int queryMode = 1; - private long recordsPerTable = 10000; + private long recordsPerTable = 1; private int recordsPerRequest = 1; private int colsPerRecord = 3; private long batchRows = 1000; - private long numOfTables = 10000; + private long numOfTables = 1; + private short replica = 1; private IntPtr conn = IntPtr.Zero; // private long rowsInserted = 0; @@ -66,6 +67,8 @@ namespace TDengineDriver Console.WriteLine("Usage: mono taosdemo.exe [OPTION...]"); Console.WriteLine(""); string indent = " "; + Console.WriteLine("{0}{1}", indent, "--help Show usage."); + Console.WriteLine(""); Console.Write("{0}{1}", indent, "-h"); Console.Write("{0}{1}{2}\n", indent, indent, "host, The host to connect to TDengine. Default is localhost."); Console.Write("{0}{1}", indent, "-p"); @@ -77,7 +80,7 @@ namespace TDengineDriver Console.Write("{0}{1}", indent, "-d"); Console.Write("{0}{1}{2}\n", indent, indent, "database, Destination database. Default is 'test'."); Console.Write("{0}{1}", indent, "-a"); - Console.Write("{0}{1}{2}\n", indent, indent, "replica, Set the replica parameters of the database, Default 1, min: 1, max: 3."); + Console.Write("{0}{1}{2}\n", indent, indent, "replica, Set the replica parameters of the database, Default 1, min: 1, max: 5."); Console.Write("{0}{1}", indent, "-m"); Console.Write("{0}{1}{2}\n", indent, indent, "table_prefix, Table prefix name. Default is 't'."); Console.Write("{0}{1}", indent, "-s"); @@ -99,9 +102,9 @@ namespace TDengineDriver Console.Write("{0}{1}", indent, "-r"); Console.Write("{0}{1}{2}\n", indent, indent, "num_of_records_per_req, The number of records per request. Default is 1000."); Console.Write("{0}{1}", indent, "-t"); - Console.Write("{0}{1}{2}\n", indent, indent, "num_of_tables, The number of tables. Default is 10000."); + Console.Write("{0}{1}{2}\n", indent, indent, "num_of_tables, The number of tables. Default is 1."); Console.Write("{0}{1}", indent, "-n"); - Console.Write("{0}{1}{2}\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 10000."); + Console.Write("{0}{1}{2}\n", indent, indent, "num_of_records_per_table, The number of records per table. Default is 1."); Console.Write("{0}{1}", indent, "-c"); Console.Write("{0}{1}{2}\n", indent, indent, "config_directory, Configuration directory. Default is '/etc/taos/'."); Console.Write("{0}{1}", indent, "-x"); @@ -133,14 +136,15 @@ namespace TDengineDriver tablePrefix = this.GetArgumentAsString(argv, "-m", "t"); isInsertOnly = this.GetArgumentAsFlag(argv, "-x"); queryMode = (int)this.GetArgumentAsLong(argv, "-q", 0, 1, 0); - numOfTables = this.GetArgumentAsLong(argv, "-t", 1, 1000000000, 10000); + numOfTables = this.GetArgumentAsLong(argv, "-t", 1, 1000000000, 1); batchRows = this.GetArgumentAsLong(argv, "-r", 1, 10000, 1000); - recordsPerTable = this.GetArgumentAsLong(argv, "-n", 1, 100000000000, 10000); + recordsPerTable = this.GetArgumentAsLong(argv, "-n", 1, 100000000000, 1); recordsPerRequest = (int)this.GetArgumentAsLong(argv, "-r", 1, 10000, 1); colsPerRecord = (int)this.GetArgumentAsLong(argv, "-l", 1, 1024, 3); configDir = this.GetArgumentAsString(argv, "-c", "C:/TDengine/cfg"); useStable = this.GetArgumentAsFlag(argv, "-M"); + replica = (short)this.GetArgumentAsLong(argv, "-a", 1, 5, 1); methodOfDelete = (short)this.GetArgumentAsLong(argv, "-D", 0, 3, 0); numOfThreads = (short)this.GetArgumentAsLong(argv, "-T", 1, 10000, 1); order = this.GetArgumentAsFlag(argv, "-O"); @@ -153,13 +157,14 @@ namespace TDengineDriver Console.Write("# Server IP: {0}\n", host); Console.Write("# User: {0}\n", user); Console.Write("# Password: {0}\n", password); - Console.Write("# Use super table: {0}\n", useStable); Console.Write("# Number of Columns per record: {0}\n", colsPerRecord); Console.Write("# Number of Threads: {0}\n", numOfThreads); Console.Write("# Number of Tables: {0}\n", numOfTables); Console.Write("# Number of Data per Table: {0}\n", recordsPerTable); Console.Write("# Records/Request: {0}\n", recordsPerRequest); Console.Write("# Database name: {0}\n", dbName); + Console.Write("# Replica: {0}\n", replica); + Console.Write("# Use STable: {0}\n", useStable); Console.Write("# Table prefix: {0}\n", tablePrefix); Console.Write("# Data order: {0}\n", order); Console.Write("# Data out of order rate: {0}\n", rateOfOutorder); @@ -280,7 +285,7 @@ namespace TDengineDriver public void ConnectTDengine() { string db = ""; - DebugPrintFormat("host:{0} user:{1}, pass:{2}; db:{3}, port:{4}", + DebugPrintFormat("host:{0} user:{1}, pass:{2}; db:{3}, port:{4}\n", this.host, this.user, this.password, db, this.port); this.conn = TDengine.Connect(this.host, this.user, this.password, db, this.port); if (this.conn == IntPtr.Zero) @@ -320,6 +325,7 @@ namespace TDengineDriver createTableThread.verbose = verbose; createTableThread.dbName = this.dbName; createTableThread.tablePrefix = this.tablePrefix; + createTableThread.useStable = useStable; if (useStable) { createTableThread.stableName = stableName; @@ -363,7 +369,7 @@ namespace TDengineDriver public void CreateDb() { StringBuilder sql = new StringBuilder(); - sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName); + sql.Append("CREATE DATABASE IF NOT EXISTS ").Append(this.dbName).Append(" replica ").Append(this.replica); IntPtr res = TDengine.Query(this.conn, sql.ToString()); if (res != IntPtr.Zero) { @@ -429,7 +435,7 @@ namespace TDengineDriver insertThread.tablePrefix = this.tablePrefix; if (useStable) { - insertThread.stableName = stableName; + // insertThread.stableName = stableName; } insertThread.conn = conn; @@ -584,15 +590,20 @@ namespace TDengineDriver tester.InitTDengine(); tester.ConnectTDengine(); - tester.dropDatabase(); - tester.CreateDb(); - if (tester.useStable == true) + if (tester.isInsertOnly == false) { - tester.CreateStable(); - } + tester.dropDatabase(); + tester.CreateDb(); + - tester.CreateTablesByThreads(); + if (tester.useStable == true) + { + tester.CreateStable(); + } + + tester.CreateTablesByThreads(); + } Stopwatch watch = Stopwatch.StartNew(); tester.InsertByThreads(); @@ -619,7 +630,7 @@ namespace TDengineDriver public string dbName { set; get; } public IntPtr conn { set; get; } public string tablePrefix { set; get; } - public string stableName { set; get; } + // public string stableName { set; get; } public long recordsPerTable { set; get; } public long batchRows { set; get; } public long numOfTables { set; get; } @@ -643,9 +654,18 @@ namespace TDengineDriver public void ThreadMain() { - DebugPrintFormat("InsertDataThread {0} from {1} to {2}", id, start, end); + DebugPrintFormat("InsertDataThread {0} from {1} to {2}\n", id, start, end); StringBuilder sql = new StringBuilder(); - long beginTimestamp = 1551369600000L; + + DateTime now = DateTime.Now; + int h = now.Hour; + int m = now.Minute; + int s = now.Second; + + long baseTimestamp = 1609430400000; // 2021/01/01 0:0:0 + DebugPrintFormat("beginTime is {0} + {1}h:{2}m:{3}s\n", baseTimestamp, h, m, s); + long beginTimestamp = baseTimestamp + ((h*60 + m) * 60 + s) * 1000; + long rowsInserted = 0; // System.DateTime startTime = new System.DateTime(); @@ -660,7 +680,11 @@ namespace TDengineDriver sql.Append("INSERT INTO "). Append(this.dbName).Append(".").Append(this.tablePrefix).Append(table). Append(" VALUES"); - for (int batch = 0; batch < this.batchRows; ++batch) + if (recordsPerTable < batchRows) + { + batchRows = recordsPerTable; + } + for (int batch = 0; batch < batchRows; ++batch) { sql.Append("(") .Append(beginTimestamp + i + batch) @@ -701,6 +725,7 @@ namespace TDengineDriver public string tablePrefix { set; get; } public string stableName { set; get; } public bool verbose { set; get; } + public bool useStable { set; get; } private void DebugPrintFormat(string format, params object[] parameters) { @@ -720,7 +745,7 @@ namespace TDengineDriver public void ThreadMain() { - DebugPrintFormat("CreateTable {0} from {1} to {2}", id, start, end); + DebugPrintFormat("CreateTable {0} from {1} to {2}\n", id, start, end); StringBuilder sql = new StringBuilder(); @@ -728,9 +753,16 @@ namespace TDengineDriver { sql.Clear(); sql = sql.Append("CREATE TABLE IF NOT EXISTS "). - Append(this.dbName).Append(".").Append(this.tablePrefix).Append(tableId). - Append(" USING ").Append(this.dbName).Append(".").Append(this.stableName). - Append(" TAGS(").Append(tableId).Append(")"); + Append(this.dbName).Append(".").Append(this.tablePrefix).Append(tableId); + if (useStable == true) + { + sql = sql.Append(" USING ").Append(this.dbName).Append(".").Append(this.stableName). + Append(" TAGS(").Append(tableId).Append(")"); + } + else + { + sql = sql.Append("(ts timestamp, v1 bool, v2 tinyint, v3 smallint, v4 int, v5 bigint, v6 float, v7 double, v8 binary(10), v9 nchar(10))"); + } IntPtr res = TDengine.Query(this.conn, sql.ToString()); if (res != IntPtr.Zero) { diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index 46729b851244cb767f16f69b03c1235e69bf21bc..d075fc8f2ad480535075b79efc15c55d9bb799a5 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -48,4 +48,12 @@ + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.15 + + + diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java index b5012c215fa4b4ab16d8ff5252a9518e2998e39a..e569de10cf7894aa04fc3cb5bdb8354b581d5a93 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java @@ -5,7 +5,7 @@ import java.util.Properties; public class JDBCDemo { private static String host; - private static String driverType; + private static String driverType = "jni"; private static final String dbName = "test"; private static final String tbName = "weather"; private Connection connection; @@ -21,7 +21,7 @@ public class JDBCDemo { } } - if (host == null || driverType == null) { + if (host == null) { printHelp(); } diff --git a/tests/pytest/test.py b/tests/pytest/test.py index 6be86fe3fdc4f0cd781d7749b9011a72ad687571..c7781f20873066b02416049e8c472e07c0c3e328 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -118,8 +118,11 @@ if __name__ == "__main__": tdDnodes.stopAll() is_test_framework = 0 key_word = 'tdCases.addLinux' - if key_word in open(fileName).read(): - is_test_framework = 1 + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass if is_test_framework: moduleName = fileName.replace(".py", "").replace("/", ".") uModule = importlib.import_module(moduleName) diff --git a/tests/script/general/connection/sim.tar.gz b/tests/script/general/connection/sim.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..10bc1a6bace1c8b6796a98c53e3aa3c15e0bfd7c Binary files /dev/null and b/tests/script/general/connection/sim.tar.gz differ diff --git a/tests/script/general/connection/test_old_data.sim b/tests/script/general/connection/test_old_data.sim new file mode 100644 index 0000000000000000000000000000000000000000..83df850f0bb93fd216520bbea4065400aa334091 --- /dev/null +++ b/tests/script/general/connection/test_old_data.sim @@ -0,0 +1,32 @@ +system sh/stop_dnodes.sh +system sh/mv_old_data.sh + +print ============== deploy + +system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start + +print =============== step1 + +sql use test +sql select * from m1 + +print $rows points data are retrieved +if $rows != 7 then + return -1 +endi + +print =============== step 2 + +sql select * from t1 + +print $rows points data are retrieved +if $rows != 7 then + return -1 +endi + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/select_with_tags.sim b/tests/script/general/parser/select_with_tags.sim index dab76f60044c7b587e6c98bb256f895aeedb656e..c254d31ffc20693918ae32db0e9afb036a7fd44f 100644 --- a/tests/script/general/parser/select_with_tags.sim +++ b/tests/script/general/parser/select_with_tags.sim @@ -813,8 +813,6 @@ sql_error select first(ts), first(c1),tbname from select_tags_mt0; sql_error select first(ts), last(ts), tbname from select_tags_mt0; sql_error select last_row(*), first(ts), tbname, t1, t2 from select_tags_mt0; sql_error select tbname, last_row(*), t1, first(ts) from select_tags_mt0; -sql_error select first(ts), tbname from select_tags_tb0; -sql_error select last_row(*), t1 from select_tags_tb0; sql_error select count(*), tbname from select_tags_mt0; sql_error select sum(c2), tbname from select_tags_mt0; sql_error select avg(c3), tbname from select_tags_mt0; diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index efe437fc1d319796dbf6179d570a38107618bc69..733b01f89500c63201d01ff5d13bc66c92efe109 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -385,3 +385,8 @@ cd ../../../debug; make ./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir.sim ./test.sh -f unique/migrate/mn2_vn2_repl2_rmMnodeVnodeDir_stopAll_starAll.sim ./test.sh -f unique/migrate/mn2_vn2_repl2_rmVnodeDir.sim + +./test.sh -f general/connection/test_old_data.sim +./test.sh -f unique/dnode/datatrans_3node.sim +./test.sh -f unique/dnode/datatrans_3node_2.sim + diff --git a/tests/script/jenkins/basic_1.txt b/tests/script/jenkins/basic_1.txt index 9820a00f40b60263537f98cafb098b34b57032ff..c124b60f806ff2e082289fa7e98b896a77e9d3b9 100644 --- a/tests/script/jenkins/basic_1.txt +++ b/tests/script/jenkins/basic_1.txt @@ -1,26 +1,3 @@ -./test.sh -f general/alter/cached_schema_after_alter.sim -./test.sh -f general/alter/count.sim -./test.sh -f general/alter/dnode.sim -./test.sh -f general/alter/import.sim -./test.sh -f general/alter/insert1.sim -./test.sh -f general/alter/insert2.sim -./test.sh -f general/alter/metrics.sim -./test.sh -f general/alter/table.sim - -./test.sh -f general/cache/new_metrics.sim -./test.sh -f general/cache/restart_metrics.sim -./test.sh -f general/cache/restart_table.sim - -./test.sh -f general/connection/connection.sim - -./test.sh -f general/column/commit.sim -./test.sh -f general/column/metrics.sim -./test.sh -f general/column/table.sim - -./test.sh -f general/compress/commitlog.sim -./test.sh -f general/compress/compress.sim -./test.sh -f general/compress/compress2.sim -./test.sh -f general/compress/uncompress.sim ./test.sh -f general/compute/avg.sim ./test.sh -f general/compute/bottom.sim @@ -153,14 +130,6 @@ ./test.sh -f general/db/nosuchfile.sim ./test.sh -f general/parser/function.sim -./test.sh -f general/stable/disk.sim -./test.sh -f general/stable/dnode3.sim -./test.sh -f general/stable/metrics.sim -./test.sh -f general/stable/refcount.sim -./test.sh -f general/stable/show.sim -./test.sh -f general/stable/values.sim -./test.sh -f general/stable/vnode3.sim - ./test.sh -f general/table/autocreate.sim ./test.sh -f general/table/basic1.sim ./test.sh -f general/table/basic2.sim @@ -188,20 +157,4 @@ ./test.sh -f general/table/table.sim ./test.sh -f general/table/tinyint.sim ./test.sh -f general/table/vgroup.sim -./test.sh -f unique/dnode/alternativeRole.sim -./test.sh -f unique/dnode/balance1.sim -./test.sh -f unique/dnode/balance2.sim -./test.sh -f unique/dnode/balance3.sim -./test.sh -f unique/dnode/balancex.sim -./test.sh -f unique/dnode/offline1.sim -./test.sh -f unique/dnode/offline2.sim -./test.sh -f unique/dnode/reason.sim -./test.sh -f unique/dnode/remove1.sim -./test.sh -f unique/dnode/remove2.sim -./test.sh -f unique/dnode/vnode_clean.sim - -./test.sh -f unique/http/admin.sim -./test.sh -f unique/http/opentsdb.sim -./test.sh -f unique/import/replica2.sim -./test.sh -f unique/import/replica3.sim diff --git a/tests/script/jenkins/basic_3.txt b/tests/script/jenkins/basic_3.txt index b44a2c6d44611da0360fe0a2c4266fd92f99aee2..25bfde28f0b724335e248933f01e72c3c35fd3dc 100644 --- a/tests/script/jenkins/basic_3.txt +++ b/tests/script/jenkins/basic_3.txt @@ -73,3 +73,7 @@ ./test.sh -f general/stream/stream_restart.sim ./test.sh -f general/stream/table_del.sim ./test.sh -f general/stream/table_replica1_vnoden.sim + +./test.sh -f general/connection/test_old_data.sim +./test.sh -f unique/dnode/datatrans_3node.sim +./test.sh -f unique/dnode/datatrans_3node_2.sim \ No newline at end of file diff --git a/tests/script/jenkins/basic_4.txt b/tests/script/jenkins/basic_4.txt new file mode 100644 index 0000000000000000000000000000000000000000..895281f218717af7dac3b8e1f890ce3e03b81609 --- /dev/null +++ b/tests/script/jenkins/basic_4.txt @@ -0,0 +1,49 @@ +./test.sh -f unique/dnode/alternativeRole.sim +./test.sh -f unique/dnode/balance1.sim +./test.sh -f unique/dnode/balance2.sim +./test.sh -f unique/dnode/balance3.sim +./test.sh -f unique/dnode/balancex.sim +./test.sh -f unique/dnode/offline1.sim +./test.sh -f unique/dnode/offline2.sim +./test.sh -f unique/dnode/reason.sim +./test.sh -f unique/dnode/remove1.sim +./test.sh -f unique/dnode/remove2.sim +./test.sh -f unique/dnode/vnode_clean.sim + +./test.sh -f unique/http/admin.sim +./test.sh -f unique/http/opentsdb.sim + +./test.sh -f unique/import/replica2.sim +./test.sh -f unique/import/replica3.sim + +./test.sh -f general/alter/cached_schema_after_alter.sim +./test.sh -f general/alter/count.sim +./test.sh -f general/alter/dnode.sim +./test.sh -f general/alter/import.sim +./test.sh -f general/alter/insert1.sim +./test.sh -f general/alter/insert2.sim +./test.sh -f general/alter/metrics.sim +./test.sh -f general/alter/table.sim + +./test.sh -f general/cache/new_metrics.sim +./test.sh -f general/cache/restart_metrics.sim +./test.sh -f general/cache/restart_table.sim + +./test.sh -f general/connection/connection.sim + +./test.sh -f general/column/commit.sim +./test.sh -f general/column/metrics.sim +./test.sh -f general/column/table.sim + +./test.sh -f general/compress/commitlog.sim +./test.sh -f general/compress/compress.sim +./test.sh -f general/compress/compress2.sim +./test.sh -f general/compress/uncompress.sim + +./test.sh -f general/stable/disk.sim +./test.sh -f general/stable/dnode3.sim +./test.sh -f general/stable/metrics.sim +./test.sh -f general/stable/refcount.sim +./test.sh -f general/stable/show.sim +./test.sh -f general/stable/values.sim +./test.sh -f general/stable/vnode3.sim \ No newline at end of file diff --git a/tests/script/sh/move_dnode.sh b/tests/script/sh/move_dnode.sh new file mode 100755 index 0000000000000000000000000000000000000000..d6dc4bc3eb24fe094067cffcb51a7c335a512a94 --- /dev/null +++ b/tests/script/sh/move_dnode.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +echo "Executing move_dnode.sh" + +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` +echo "SCRIPT_DIR: $SCRIPT_DIR" + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` + +if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` +else + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` +fi + +BUILD_DIR=$TAOS_DIR/$BIN_DIR/build + +SIM_DIR=$TAOS_DIR/sim + +NODE_DIR=$SIM_DIR/$NODE_NAME + +if [ -d "$SIM_DIR/$2" ];then + rm -rf $SIM_DIR/$2 +fi +mv $SIM_DIR/$1 $SIM_DIR/$2 + +if [[ $2 =~ "dnode2" ]];then + sed -i 's/serverPort 7100/serverPort 7200/g' $SIM_DIR/$2/cfg/taos.cfg + sed -i 's/dnode1/dnode2/g' $SIM_DIR/$2/cfg/taos.cfg + sed -i 's/7100/7200/g' $SIM_DIR/$2/data/dnode/dnodeEps.json +elif [[ $2 =~ "dnode4" ]];then + sed -i 's/serverPort 7100/serverPort 7400/g' $SIM_DIR/$2/cfg/taos.cfg + sed -i 's/dnode1/dnode4/g' $SIM_DIR/$2/cfg/taos.cfg + sed -i 's/7100/7400/g' $SIM_DIR/dnode2/data/dnode/dnodeEps.json + sed -i 's/7100/7400/g' $SIM_DIR/dnode3/data/dnode/dnodeEps.json + sed -i 's/7100/7400/g' $SIM_DIR/$2/data/dnode/dnodeEps.json +fi diff --git a/tests/script/sh/mv_old_data.sh b/tests/script/sh/mv_old_data.sh new file mode 100755 index 0000000000000000000000000000000000000000..112e9760d637d9698b87729c06ba7b2bde9651f8 --- /dev/null +++ b/tests/script/sh/mv_old_data.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +echo "Executing mv_old_data.sh" + +SCRIPT_DIR=`dirname $0` +cd $SCRIPT_DIR/../ +SCRIPT_DIR=`pwd` +echo "SCRIPT_DIR: $SCRIPT_DIR" + +IN_TDINTERNAL="community" +if [[ "$SCRIPT_DIR" == *"$IN_TDINTERNAL"* ]]; then + cd ../../.. +else + cd ../../ +fi + +TAOS_DIR=`pwd` +TAOSD_DIR=`find . -name "taosd"|grep bin|head -n1` + +if [[ "$TAOSD_DIR" == *"$IN_TDINTERNAL"* ]]; then + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2,3` +else + BIN_DIR=`find . -name "taosd"|grep bin|head -n1|cut -d '/' --fields=2` +fi + +BUILD_DIR=$TAOS_DIR/$BIN_DIR/build + +SIM_DIR=$TAOS_DIR/sim + +NODE_DIR=$SIM_DIR/$NODE_NAME + +rm -rf $SIM_DIR/dnode1 +rm -rf $SIM_DIR/dnode2 +rm -rf $SIM_DIR/dnode3 +rm -rf $SIM_DIR/tsim + +tar zxf $SCRIPT_DIR/general/connection/sim.tar.gz -C $SIM_DIR/../ diff --git a/tests/script/unique/db/replica_reduce31.sim b/tests/script/unique/db/replica_reduce31.sim index 2313cbd85e05813a29bce15f96ac0094a3eb1664..5350bcc78c327ae9eb35f24e6d01901cebfb7a07 100644 --- a/tests/script/unique/db/replica_reduce31.sim +++ b/tests/script/unique/db/replica_reduce31.sim @@ -286,27 +286,27 @@ system sh/exec.sh -n dnode2 -s stop -x SIGINT sql reset query cache sleep 100 -sql insert into d1.t1 values(now, 4) -x step1 -step1: -sql insert into d2.t2 values(now, 4) -x step2 -step2: -sql insert into d3.t3 values(now, 4) -x step3 -step3: -sql insert into d4.t4 values(now, 4) -x step4 -step4: +#sql insert into d1.t1 values(now, 4) -x step1 +#step1: +#sql insert into d2.t2 values(now, 4) -x step2 +#step2: +#sql insert into d3.t3 values(now, 4) -x step3 +#step3: +#sql insert into d4.t4 values(now, 4) -x step4 +#step4: print ========= step5 system sh/exec.sh -n dnode2 -s start system sh/exec.sh -n dnode3 -s stop -x SIGINT -sql insert into d1.t1 values(now, 5) -x step5 -step5: -sql insert into d2.t2 values(now, 5) -x step6 -step6: -sql insert into d3.t3 values(now, 5) -x step7 -step7: -sql insert into d4.t4 values(now, 5) -x step8 -step8: +#sql insert into d1.t1 values(now, 5) -x step5 +#step5: +#sql insert into d2.t2 values(now, 5) -x step6 +#step6: +#sql insert into d3.t3 values(now, 5) -x step7 +#step7: +#sql insert into d4.t4 values(now, 5) -x step8 +#step8: print ========= step6 system sh/exec.sh -n dnode3 -s start diff --git a/tests/script/unique/dnode/datatrans_1node.sim b/tests/script/unique/dnode/datatrans_1node.sim new file mode 100644 index 0000000000000000000000000000000000000000..bc38bfaf2df172dfb752eaf94549995ed513ba74 --- /dev/null +++ b/tests/script/unique/dnode/datatrans_1node.sim @@ -0,0 +1,53 @@ + +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c wallevel -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 3000 +sql connect + +print =============== step1 +sql drop database -x step1 +step1: +sql create database db +sql use db +sql create table m1 (ts timestamp, speed int) + +print =============== step 2 +$x = 0 +while $x < 10 + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + sql insert into m1 values ($ms , $x ) + $x = $x + 1 +endw + +sql select * from m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + +print =============== step 3 +system sh/move_dnode.sh dnode1 dnode2 +system sh/exec.sh -n dnode2 -s start + + +print =============== step 4 +sleep 3000 +sql connect + +sql select * from db.m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode2 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/dnode/datatrans_3node.sim b/tests/script/unique/dnode/datatrans_3node.sim new file mode 100644 index 0000000000000000000000000000000000000000..7c3708c1110df2714db25a59b6a5bc37b5f3818c --- /dev/null +++ b/tests/script/unique/dnode/datatrans_3node.sim @@ -0,0 +1,91 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 + + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 +system sh/cfg.sh -n dnode2 -c walLevel -v 2 +system sh/cfg.sh -n dnode3 -c walLevel -v 2 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c role -v 2 +system sh/cfg.sh -n dnode2 -c role -v 2 +system sh/cfg.sh -n dnode3 -c role -v 2 + + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 + + + + +print ============== step1: start dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect + +print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +# create table +sql drop database -x step1 +step1: +sql create database db +sql use db +sql create table m1 (ts timestamp, speed int) + +# insert data +$x = 0 +while $x < 10 + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + sql insert into m1 values ($ms , $x ) + $x = $x + 1 +endw + +sql select * from m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +print ============== step3: stop cluster , then move_dnode1 ,start cluster +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT + +system sh/move_dnode.sh dnode1 dnode4 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +print =============== step 4 +sleep 3000 +sql connect + +sql select * from db.m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/unique/dnode/datatrans_3node_2.sim b/tests/script/unique/dnode/datatrans_3node_2.sim new file mode 100644 index 0000000000000000000000000000000000000000..4fb3b4535fb640ac7257af33b651941462a56896 --- /dev/null +++ b/tests/script/unique/dnode/datatrans_3node_2.sim @@ -0,0 +1,91 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/deploy.sh -n dnode2 -i 2 +system sh/deploy.sh -n dnode3 -i 3 + + +system sh/cfg.sh -n dnode1 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode2 -c numOfMnodes -v 1 +system sh/cfg.sh -n dnode3 -c numOfMnodes -v 1 + + +system sh/cfg.sh -n dnode1 -c walLevel -v 2 +system sh/cfg.sh -n dnode2 -c walLevel -v 2 +system sh/cfg.sh -n dnode3 -c walLevel -v 2 + +system sh/cfg.sh -n dnode1 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode2 -c balanceInterval -v 10 +system sh/cfg.sh -n dnode3 -c balanceInterval -v 10 + +system sh/cfg.sh -n dnode1 -c role -v 2 +system sh/cfg.sh -n dnode2 -c role -v 2 +system sh/cfg.sh -n dnode3 -c role -v 2 + + +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode2 -c maxtablesPerVnode -v 4 +system sh/cfg.sh -n dnode3 -c maxtablesPerVnode -v 4 + + + + +print ============== step1: start dnode1 +system sh/exec.sh -n dnode1 -s start +sleep 3000 +sql connect + +print ============== step2: start dnode2/dnode3 and add into cluster , then create database with replica 2, and create table, insert data +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +sql create dnode $hostname2 +sql create dnode $hostname3 +sleep 3000 + +# create table +sql drop database -x step1 +step1: +sql create database db replica 2 +sql use db +sql create table m1 (ts timestamp, speed int) + +# insert data +$x = 0 +while $x < 10 + $cc = $x * 60000 + $ms = 1601481600000 + $cc + + sql insert into m1 values ($ms , $x ) + $x = $x + 1 +endw + +sql select * from m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +print ============== step3: stop cluster , then move_dnode1 ,start cluster +system sh/exec.sh -n dnode1 -s stop -x SIGINT +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT + +system sh/move_dnode.sh dnode1 dnode4 +system sh/exec.sh -n dnode2 -s start +system sh/exec.sh -n dnode3 -s start +system sh/exec.sh -n dnode4 -s start + +print =============== step 4 +sleep 3000 +sql connect + +sql select * from db.m1 + +print $rows points data are retrieved +if $rows != 10 then + return -1 +endi + +system sh/exec.sh -n dnode2 -s stop -x SIGINT +system sh/exec.sh -n dnode3 -s stop -x SIGINT +system sh/exec.sh -n dnode4 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/test-all.sh b/tests/test-all.sh index 19d78032554682d533c89bc4e4bd473689364d87..0c1f55f5f019b102fb35ad800de7627a72cc2d9a 100755 --- a/tests/test-all.sh +++ b/tests/test-all.sh @@ -138,6 +138,7 @@ if [ "$2" != "python" ]; then elif [ "$1" == "b1" ]; then echo "### run TSIM b1 test ###" runSimCaseOneByOne jenkins/basic_1.txt + runSimCaseOneByOne jenkins/basic_4.txt elif [ "$1" == "b2" ]; then echo "### run TSIM b2 test ###" runSimCaseOneByOne jenkins/basic_2.txt @@ -153,6 +154,9 @@ if [ "$2" != "python" ]; then elif [ "$1" == "b3fq" ]; then echo "### run TSIM b3 test ###" runSimCaseOneByOnefq jenkins/basic_3.txt + elif [ "$1" == "b4fq" ]; then + echo "### run TSIM b4 test ###" + runSimCaseOneByOnefq jenkins/basic_4.txt elif [ "$1" == "smoke" ] || [ -z "$1" ]; then echo "### run TSIM smoke test ###" runSimCaseOneByOne basicSuite.sim